Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox...
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37                                u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
40
41 static struct hnae3_ae_algo ae_algo;
42
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51         /* required last entry */
52         {0, }
53 };
54
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
56
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58                                          HCLGE_CMDQ_TX_ADDR_H_REG,
59                                          HCLGE_CMDQ_TX_DEPTH_REG,
60                                          HCLGE_CMDQ_TX_TAIL_REG,
61                                          HCLGE_CMDQ_TX_HEAD_REG,
62                                          HCLGE_CMDQ_RX_ADDR_L_REG,
63                                          HCLGE_CMDQ_RX_ADDR_H_REG,
64                                          HCLGE_CMDQ_RX_DEPTH_REG,
65                                          HCLGE_CMDQ_RX_TAIL_REG,
66                                          HCLGE_CMDQ_RX_HEAD_REG,
67                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
68                                          HCLGE_CMDQ_INTR_STS_REG,
69                                          HCLGE_CMDQ_INTR_EN_REG,
70                                          HCLGE_CMDQ_INTR_GEN_REG};
71
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73                                            HCLGE_VECTOR0_OTER_EN_REG,
74                                            HCLGE_MISC_RESET_STS_REG,
75                                            HCLGE_MISC_VECTOR_INT_STS,
76                                            HCLGE_GLOBAL_RESET_REG,
77                                            HCLGE_FUN_RST_ING,
78                                            HCLGE_GRO_EN_REG};
79
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81                                          HCLGE_RING_RX_ADDR_H_REG,
82                                          HCLGE_RING_RX_BD_NUM_REG,
83                                          HCLGE_RING_RX_BD_LENGTH_REG,
84                                          HCLGE_RING_RX_MERGE_EN_REG,
85                                          HCLGE_RING_RX_TAIL_REG,
86                                          HCLGE_RING_RX_HEAD_REG,
87                                          HCLGE_RING_RX_FBD_NUM_REG,
88                                          HCLGE_RING_RX_OFFSET_REG,
89                                          HCLGE_RING_RX_FBD_OFFSET_REG,
90                                          HCLGE_RING_RX_STASH_REG,
91                                          HCLGE_RING_RX_BD_ERR_REG,
92                                          HCLGE_RING_TX_ADDR_L_REG,
93                                          HCLGE_RING_TX_ADDR_H_REG,
94                                          HCLGE_RING_TX_BD_NUM_REG,
95                                          HCLGE_RING_TX_PRIORITY_REG,
96                                          HCLGE_RING_TX_TC_REG,
97                                          HCLGE_RING_TX_MERGE_EN_REG,
98                                          HCLGE_RING_TX_TAIL_REG,
99                                          HCLGE_RING_TX_HEAD_REG,
100                                          HCLGE_RING_TX_FBD_NUM_REG,
101                                          HCLGE_RING_TX_OFFSET_REG,
102                                          HCLGE_RING_TX_EBD_NUM_REG,
103                                          HCLGE_RING_TX_EBD_OFFSET_REG,
104                                          HCLGE_RING_TX_BD_ERR_REG,
105                                          HCLGE_RING_EN_REG};
106
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108                                              HCLGE_TQP_INTR_GL0_REG,
109                                              HCLGE_TQP_INTR_GL1_REG,
110                                              HCLGE_TQP_INTR_GL2_REG,
111                                              HCLGE_TQP_INTR_RL_REG};
112
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
114         "App    Loopback test",
115         "Serdes serial Loopback test",
116         "Serdes parallel Loopback test",
117         "Phy    Loopback test"
118 };
119
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121         {"mac_tx_mac_pause_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123         {"mac_rx_mac_pause_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125         {"mac_tx_control_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127         {"mac_rx_control_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129         {"mac_tx_pfc_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131         {"mac_tx_pfc_pri0_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133         {"mac_tx_pfc_pri1_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135         {"mac_tx_pfc_pri2_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137         {"mac_tx_pfc_pri3_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139         {"mac_tx_pfc_pri4_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141         {"mac_tx_pfc_pri5_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143         {"mac_tx_pfc_pri6_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145         {"mac_tx_pfc_pri7_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147         {"mac_rx_pfc_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149         {"mac_rx_pfc_pri0_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151         {"mac_rx_pfc_pri1_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153         {"mac_rx_pfc_pri2_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155         {"mac_rx_pfc_pri3_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157         {"mac_rx_pfc_pri4_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159         {"mac_rx_pfc_pri5_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161         {"mac_rx_pfc_pri6_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163         {"mac_rx_pfc_pri7_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165         {"mac_tx_total_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167         {"mac_tx_total_oct_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169         {"mac_tx_good_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171         {"mac_tx_bad_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173         {"mac_tx_good_oct_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175         {"mac_tx_bad_oct_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177         {"mac_tx_uni_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179         {"mac_tx_multi_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181         {"mac_tx_broad_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183         {"mac_tx_undersize_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185         {"mac_tx_oversize_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187         {"mac_tx_64_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189         {"mac_tx_65_127_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191         {"mac_tx_128_255_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193         {"mac_tx_256_511_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195         {"mac_tx_512_1023_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197         {"mac_tx_1024_1518_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199         {"mac_tx_1519_2047_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201         {"mac_tx_2048_4095_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203         {"mac_tx_4096_8191_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205         {"mac_tx_8192_9216_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207         {"mac_tx_9217_12287_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209         {"mac_tx_12288_16383_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211         {"mac_tx_1519_max_good_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213         {"mac_tx_1519_max_bad_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215         {"mac_rx_total_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217         {"mac_rx_total_oct_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219         {"mac_rx_good_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221         {"mac_rx_bad_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223         {"mac_rx_good_oct_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225         {"mac_rx_bad_oct_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227         {"mac_rx_uni_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229         {"mac_rx_multi_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231         {"mac_rx_broad_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233         {"mac_rx_undersize_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235         {"mac_rx_oversize_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237         {"mac_rx_64_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239         {"mac_rx_65_127_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241         {"mac_rx_128_255_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243         {"mac_rx_256_511_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245         {"mac_rx_512_1023_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247         {"mac_rx_1024_1518_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249         {"mac_rx_1519_2047_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251         {"mac_rx_2048_4095_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253         {"mac_rx_4096_8191_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255         {"mac_rx_8192_9216_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257         {"mac_rx_9217_12287_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259         {"mac_rx_12288_16383_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261         {"mac_rx_1519_max_good_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263         {"mac_rx_1519_max_bad_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
265
266         {"mac_tx_fragment_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268         {"mac_tx_undermin_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270         {"mac_tx_jabber_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272         {"mac_tx_err_all_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274         {"mac_tx_from_app_good_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276         {"mac_tx_from_app_bad_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278         {"mac_rx_fragment_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280         {"mac_rx_undermin_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282         {"mac_rx_jabber_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284         {"mac_rx_fcs_err_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286         {"mac_rx_send_app_good_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288         {"mac_rx_send_app_bad_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
290 };
291
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
293         {
294                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298                 .i_port_bitmap = 0x1,
299         },
300 };
301
302 static const u8 hclge_hash_key[] = {
303         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
308 };
309
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
311 {
312 #define HCLGE_MAC_CMD_NUM 21
313
314         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
316         __le64 *desc_data;
317         int i, k, n;
318         int ret;
319
320         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
322         if (ret) {
323                 dev_err(&hdev->pdev->dev,
324                         "Get MAC pkt stats fail, status = %d.\n", ret);
325
326                 return ret;
327         }
328
329         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330                 /* for special opcode 0032, only the first desc has the head */
331                 if (unlikely(i == 0)) {
332                         desc_data = (__le64 *)(&desc[i].data[0]);
333                         n = HCLGE_RD_FIRST_STATS_NUM;
334                 } else {
335                         desc_data = (__le64 *)(&desc[i]);
336                         n = HCLGE_RD_OTHER_STATS_NUM;
337                 }
338
339                 for (k = 0; k < n; k++) {
340                         *data += le64_to_cpu(*desc_data);
341                         data++;
342                         desc_data++;
343                 }
344         }
345
346         return 0;
347 }
348
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
350 {
351         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352         struct hclge_desc *desc;
353         __le64 *desc_data;
354         u16 i, k, n;
355         int ret;
356
357         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
358         if (!desc)
359                 return -ENOMEM;
360         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
362         if (ret) {
363                 kfree(desc);
364                 return ret;
365         }
366
367         for (i = 0; i < desc_num; i++) {
368                 /* for special opcode 0034, only the first desc has the head */
369                 if (i == 0) {
370                         desc_data = (__le64 *)(&desc[i].data[0]);
371                         n = HCLGE_RD_FIRST_STATS_NUM;
372                 } else {
373                         desc_data = (__le64 *)(&desc[i]);
374                         n = HCLGE_RD_OTHER_STATS_NUM;
375                 }
376
377                 for (k = 0; k < n; k++) {
378                         *data += le64_to_cpu(*desc_data);
379                         data++;
380                         desc_data++;
381                 }
382         }
383
384         kfree(desc);
385
386         return 0;
387 }
388
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
390 {
391         struct hclge_desc desc;
392         __le32 *desc_data;
393         u32 reg_num;
394         int ret;
395
396         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
398         if (ret)
399                 return ret;
400
401         desc_data = (__le32 *)(&desc.data[0]);
402         reg_num = le32_to_cpu(*desc_data);
403
404         *desc_num = 1 + ((reg_num - 3) >> 2) +
405                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
406
407         return 0;
408 }
409
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
411 {
412         u32 desc_num;
413         int ret;
414
415         ret = hclge_mac_query_reg_num(hdev, &desc_num);
416
417         /* The firmware supports the new statistics acquisition method */
418         if (!ret)
419                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420         else if (ret == -EOPNOTSUPP)
421                 ret = hclge_mac_update_stats_defective(hdev);
422         else
423                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
424
425         return ret;
426 }
427
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
429 {
430         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431         struct hclge_vport *vport = hclge_get_vport(handle);
432         struct hclge_dev *hdev = vport->back;
433         struct hnae3_queue *queue;
434         struct hclge_desc desc[1];
435         struct hclge_tqp *tqp;
436         int ret, i;
437
438         for (i = 0; i < kinfo->num_tqps; i++) {
439                 queue = handle->kinfo.tqp[i];
440                 tqp = container_of(queue, struct hclge_tqp, q);
441                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442                 hclge_cmd_setup_basic_desc(&desc[0],
443                                            HCLGE_OPC_QUERY_RX_STATUS,
444                                            true);
445
446                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
448                 if (ret) {
449                         dev_err(&hdev->pdev->dev,
450                                 "Query tqp stat fail, status = %d,queue = %d\n",
451                                 ret,    i);
452                         return ret;
453                 }
454                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455                         le32_to_cpu(desc[0].data[1]);
456         }
457
458         for (i = 0; i < kinfo->num_tqps; i++) {
459                 queue = handle->kinfo.tqp[i];
460                 tqp = container_of(queue, struct hclge_tqp, q);
461                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462                 hclge_cmd_setup_basic_desc(&desc[0],
463                                            HCLGE_OPC_QUERY_TX_STATUS,
464                                            true);
465
466                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
468                 if (ret) {
469                         dev_err(&hdev->pdev->dev,
470                                 "Query tqp stat fail, status = %d,queue = %d\n",
471                                 ret, i);
472                         return ret;
473                 }
474                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475                         le32_to_cpu(desc[0].data[1]);
476         }
477
478         return 0;
479 }
480
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
482 {
483         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484         struct hclge_tqp *tqp;
485         u64 *buff = data;
486         int i;
487
488         for (i = 0; i < kinfo->num_tqps; i++) {
489                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
491         }
492
493         for (i = 0; i < kinfo->num_tqps; i++) {
494                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
496         }
497
498         return buff;
499 }
500
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
502 {
503         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
504
505         return kinfo->num_tqps * (2);
506 }
507
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
509 {
510         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
511         u8 *buff = data;
512         int i = 0;
513
514         for (i = 0; i < kinfo->num_tqps; i++) {
515                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516                         struct hclge_tqp, q);
517                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
518                          tqp->index);
519                 buff = buff + ETH_GSTRING_LEN;
520         }
521
522         for (i = 0; i < kinfo->num_tqps; i++) {
523                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524                         struct hclge_tqp, q);
525                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
526                          tqp->index);
527                 buff = buff + ETH_GSTRING_LEN;
528         }
529
530         return buff;
531 }
532
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534                                  const struct hclge_comm_stats_str strs[],
535                                  int size, u64 *data)
536 {
537         u64 *buf = data;
538         u32 i;
539
540         for (i = 0; i < size; i++)
541                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
542
543         return buf + size;
544 }
545
546 static u8 *hclge_comm_get_strings(u32 stringset,
547                                   const struct hclge_comm_stats_str strs[],
548                                   int size, u8 *data)
549 {
550         char *buff = (char *)data;
551         u32 i;
552
553         if (stringset != ETH_SS_STATS)
554                 return buff;
555
556         for (i = 0; i < size; i++) {
557                 snprintf(buff, ETH_GSTRING_LEN,
558                          strs[i].desc);
559                 buff = buff + ETH_GSTRING_LEN;
560         }
561
562         return (u8 *)buff;
563 }
564
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
566 {
567         struct hnae3_handle *handle;
568         int status;
569
570         handle = &hdev->vport[0].nic;
571         if (handle->client) {
572                 status = hclge_tqps_update_stats(handle);
573                 if (status) {
574                         dev_err(&hdev->pdev->dev,
575                                 "Update TQPS stats fail, status = %d.\n",
576                                 status);
577                 }
578         }
579
580         status = hclge_mac_update_stats(hdev);
581         if (status)
582                 dev_err(&hdev->pdev->dev,
583                         "Update MAC stats fail, status = %d.\n", status);
584 }
585
586 static void hclge_update_stats(struct hnae3_handle *handle,
587                                struct net_device_stats *net_stats)
588 {
589         struct hclge_vport *vport = hclge_get_vport(handle);
590         struct hclge_dev *hdev = vport->back;
591         int status;
592
593         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
594                 return;
595
596         status = hclge_mac_update_stats(hdev);
597         if (status)
598                 dev_err(&hdev->pdev->dev,
599                         "Update MAC stats fail, status = %d.\n",
600                         status);
601
602         status = hclge_tqps_update_stats(handle);
603         if (status)
604                 dev_err(&hdev->pdev->dev,
605                         "Update TQPS stats fail, status = %d.\n",
606                         status);
607
608         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
609 }
610
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614                 HNAE3_SUPPORT_PHY_LOOPBACK |\
615                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
617
618         struct hclge_vport *vport = hclge_get_vport(handle);
619         struct hclge_dev *hdev = vport->back;
620         int count = 0;
621
622         /* Loopback test support rules:
623          * mac: only GE mode support
624          * serdes: all mac mode will support include GE/XGE/LGE/CGE
625          * phy: only support when phy device exist on board
626          */
627         if (stringset == ETH_SS_TEST) {
628                 /* clear loopback bit flags at first */
629                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630                 if (hdev->pdev->revision >= 0x21 ||
631                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
634                         count += 1;
635                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
636                 }
637
638                 count += 2;
639                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641         } else if (stringset == ETH_SS_STATS) {
642                 count = ARRAY_SIZE(g_mac_stats_string) +
643                         hclge_tqps_get_sset_count(handle, stringset);
644         }
645
646         return count;
647 }
648
649 static void hclge_get_strings(struct hnae3_handle *handle,
650                               u32 stringset,
651                               u8 *data)
652 {
653         u8 *p = (char *)data;
654         int size;
655
656         if (stringset == ETH_SS_STATS) {
657                 size = ARRAY_SIZE(g_mac_stats_string);
658                 p = hclge_comm_get_strings(stringset,
659                                            g_mac_stats_string,
660                                            size,
661                                            p);
662                 p = hclge_tqps_get_strings(handle, p);
663         } else if (stringset == ETH_SS_TEST) {
664                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
665                         memcpy(p,
666                                hns3_nic_test_strs[HNAE3_LOOP_APP],
667                                ETH_GSTRING_LEN);
668                         p += ETH_GSTRING_LEN;
669                 }
670                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
671                         memcpy(p,
672                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
673                                ETH_GSTRING_LEN);
674                         p += ETH_GSTRING_LEN;
675                 }
676                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
677                         memcpy(p,
678                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
679                                ETH_GSTRING_LEN);
680                         p += ETH_GSTRING_LEN;
681                 }
682                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
683                         memcpy(p,
684                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
685                                ETH_GSTRING_LEN);
686                         p += ETH_GSTRING_LEN;
687                 }
688         }
689 }
690
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
692 {
693         struct hclge_vport *vport = hclge_get_vport(handle);
694         struct hclge_dev *hdev = vport->back;
695         u64 *p;
696
697         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
698                                  g_mac_stats_string,
699                                  ARRAY_SIZE(g_mac_stats_string),
700                                  data);
701         p = hclge_tqps_get_stats(handle, p);
702 }
703
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
705                                      u64 *rx_cnt)
706 {
707         struct hclge_vport *vport = hclge_get_vport(handle);
708         struct hclge_dev *hdev = vport->back;
709
710         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
712 }
713
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715                                    struct hclge_func_status_cmd *status)
716 {
717         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
718                 return -EINVAL;
719
720         /* Set the pf to main pf */
721         if (status->pf_state & HCLGE_PF_STATE_MAIN)
722                 hdev->flag |= HCLGE_FLAG_MAIN;
723         else
724                 hdev->flag &= ~HCLGE_FLAG_MAIN;
725
726         return 0;
727 }
728
729 static int hclge_query_function_status(struct hclge_dev *hdev)
730 {
731         struct hclge_func_status_cmd *req;
732         struct hclge_desc desc;
733         int timeout = 0;
734         int ret;
735
736         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737         req = (struct hclge_func_status_cmd *)desc.data;
738
739         do {
740                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
741                 if (ret) {
742                         dev_err(&hdev->pdev->dev,
743                                 "query function status failed %d.\n",
744                                 ret);
745
746                         return ret;
747                 }
748
749                 /* Check pf reset is done */
750                 if (req->pf_state)
751                         break;
752                 usleep_range(1000, 2000);
753         } while (timeout++ < 5);
754
755         ret = hclge_parse_func_status(hdev, req);
756
757         return ret;
758 }
759
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
761 {
762         struct hclge_pf_res_cmd *req;
763         struct hclge_desc desc;
764         int ret;
765
766         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
768         if (ret) {
769                 dev_err(&hdev->pdev->dev,
770                         "query pf resource failed %d.\n", ret);
771                 return ret;
772         }
773
774         req = (struct hclge_pf_res_cmd *)desc.data;
775         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
777
778         if (req->tx_buf_size)
779                 hdev->tx_buf_size =
780                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
781         else
782                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
783
784         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
785
786         if (req->dv_buf_size)
787                 hdev->dv_buf_size =
788                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
789         else
790                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
791
792         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
793
794         if (hnae3_dev_roce_supported(hdev)) {
795                 hdev->roce_base_msix_offset =
796                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
798                 hdev->num_roce_msi =
799                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
801
802                 /* PF should have NIC vectors and Roce vectors,
803                  * NIC vectors are queued before Roce vectors.
804                  */
805                 hdev->num_msi = hdev->num_roce_msi  +
806                                 hdev->roce_base_msix_offset;
807         } else {
808                 hdev->num_msi =
809                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
811         }
812
813         return 0;
814 }
815
816 static int hclge_parse_speed(int speed_cmd, int *speed)
817 {
818         switch (speed_cmd) {
819         case 6:
820                 *speed = HCLGE_MAC_SPEED_10M;
821                 break;
822         case 7:
823                 *speed = HCLGE_MAC_SPEED_100M;
824                 break;
825         case 0:
826                 *speed = HCLGE_MAC_SPEED_1G;
827                 break;
828         case 1:
829                 *speed = HCLGE_MAC_SPEED_10G;
830                 break;
831         case 2:
832                 *speed = HCLGE_MAC_SPEED_25G;
833                 break;
834         case 3:
835                 *speed = HCLGE_MAC_SPEED_40G;
836                 break;
837         case 4:
838                 *speed = HCLGE_MAC_SPEED_50G;
839                 break;
840         case 5:
841                 *speed = HCLGE_MAC_SPEED_100G;
842                 break;
843         default:
844                 return -EINVAL;
845         }
846
847         return 0;
848 }
849
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
851 {
852         struct hclge_vport *vport = hclge_get_vport(handle);
853         struct hclge_dev *hdev = vport->back;
854         u32 speed_ability = hdev->hw.mac.speed_ability;
855         u32 speed_bit = 0;
856
857         switch (speed) {
858         case HCLGE_MAC_SPEED_10M:
859                 speed_bit = HCLGE_SUPPORT_10M_BIT;
860                 break;
861         case HCLGE_MAC_SPEED_100M:
862                 speed_bit = HCLGE_SUPPORT_100M_BIT;
863                 break;
864         case HCLGE_MAC_SPEED_1G:
865                 speed_bit = HCLGE_SUPPORT_1G_BIT;
866                 break;
867         case HCLGE_MAC_SPEED_10G:
868                 speed_bit = HCLGE_SUPPORT_10G_BIT;
869                 break;
870         case HCLGE_MAC_SPEED_25G:
871                 speed_bit = HCLGE_SUPPORT_25G_BIT;
872                 break;
873         case HCLGE_MAC_SPEED_40G:
874                 speed_bit = HCLGE_SUPPORT_40G_BIT;
875                 break;
876         case HCLGE_MAC_SPEED_50G:
877                 speed_bit = HCLGE_SUPPORT_50G_BIT;
878                 break;
879         case HCLGE_MAC_SPEED_100G:
880                 speed_bit = HCLGE_SUPPORT_100G_BIT;
881                 break;
882         default:
883                 return -EINVAL;
884         }
885
886         if (speed_bit & speed_ability)
887                 return 0;
888
889         return -EINVAL;
890 }
891
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
893 {
894         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
896                                  mac->supported);
897         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
899                                  mac->supported);
900         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
902                                  mac->supported);
903         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
905                                  mac->supported);
906         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
908                                  mac->supported);
909 }
910
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
912 {
913         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
915                                  mac->supported);
916         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
918                                  mac->supported);
919         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
921                                  mac->supported);
922         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
924                                  mac->supported);
925         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
927                                  mac->supported);
928 }
929
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
931 {
932         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
934                                  mac->supported);
935         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
937                                  mac->supported);
938         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
940                                  mac->supported);
941         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
943                                  mac->supported);
944         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
946                                  mac->supported);
947 }
948
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
950 {
951         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
953                                  mac->supported);
954         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
956                                  mac->supported);
957         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
959                                  mac->supported);
960         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
962                                  mac->supported);
963         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
965                                  mac->supported);
966         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
968                                  mac->supported);
969 }
970
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
972 {
973         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
975
976         switch (mac->speed) {
977         case HCLGE_MAC_SPEED_10G:
978         case HCLGE_MAC_SPEED_40G:
979                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
980                                  mac->supported);
981                 mac->fec_ability =
982                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
983                 break;
984         case HCLGE_MAC_SPEED_25G:
985         case HCLGE_MAC_SPEED_50G:
986                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
987                                  mac->supported);
988                 mac->fec_ability =
989                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
990                         BIT(HNAE3_FEC_AUTO);
991                 break;
992         case HCLGE_MAC_SPEED_100G:
993                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
995                 break;
996         default:
997                 mac->fec_ability = 0;
998                 break;
999         }
1000 }
1001
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1003                                         u8 speed_ability)
1004 {
1005         struct hclge_mac *mac = &hdev->hw.mac;
1006
1007         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1009                                  mac->supported);
1010
1011         hclge_convert_setting_sr(mac, speed_ability);
1012         hclge_convert_setting_lr(mac, speed_ability);
1013         hclge_convert_setting_cr(mac, speed_ability);
1014         if (hdev->pdev->revision >= 0x21)
1015                 hclge_convert_setting_fec(mac);
1016
1017         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1020 }
1021
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1023                                             u8 speed_ability)
1024 {
1025         struct hclge_mac *mac = &hdev->hw.mac;
1026
1027         hclge_convert_setting_kr(mac, speed_ability);
1028         if (hdev->pdev->revision >= 0x21)
1029                 hclge_convert_setting_fec(mac);
1030         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1033 }
1034
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1036                                          u8 speed_ability)
1037 {
1038         unsigned long *supported = hdev->hw.mac.supported;
1039
1040         /* default to support all speed for GE port */
1041         if (!speed_ability)
1042                 speed_ability = HCLGE_SUPPORT_GE;
1043
1044         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1046                                  supported);
1047
1048         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1050                                  supported);
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1052                                  supported);
1053         }
1054
1055         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1058         }
1059
1060         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1063 }
1064
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1066 {
1067         u8 media_type = hdev->hw.mac.media_type;
1068
1069         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072                 hclge_parse_copper_link_mode(hdev, speed_ability);
1073         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1075 }
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1077 {
1078         struct hclge_cfg_param_cmd *req;
1079         u64 mac_addr_tmp_high;
1080         u64 mac_addr_tmp;
1081         int i;
1082
1083         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1084
1085         /* get the configuration */
1086         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1087                                               HCLGE_CFG_VMDQ_M,
1088                                               HCLGE_CFG_VMDQ_S);
1089         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092                                             HCLGE_CFG_TQP_DESC_N_M,
1093                                             HCLGE_CFG_TQP_DESC_N_S);
1094
1095         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096                                         HCLGE_CFG_PHY_ADDR_M,
1097                                         HCLGE_CFG_PHY_ADDR_S);
1098         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099                                           HCLGE_CFG_MEDIA_TP_M,
1100                                           HCLGE_CFG_MEDIA_TP_S);
1101         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102                                           HCLGE_CFG_RX_BUF_LEN_M,
1103                                           HCLGE_CFG_RX_BUF_LEN_S);
1104         /* get mac_address */
1105         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107                                             HCLGE_CFG_MAC_ADDR_H_M,
1108                                             HCLGE_CFG_MAC_ADDR_H_S);
1109
1110         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1111
1112         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113                                              HCLGE_CFG_DEFAULT_SPEED_M,
1114                                              HCLGE_CFG_DEFAULT_SPEED_S);
1115         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116                                             HCLGE_CFG_RSS_SIZE_M,
1117                                             HCLGE_CFG_RSS_SIZE_S);
1118
1119         for (i = 0; i < ETH_ALEN; i++)
1120                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1121
1122         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1124
1125         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126                                              HCLGE_CFG_SPEED_ABILITY_M,
1127                                              HCLGE_CFG_SPEED_ABILITY_S);
1128         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1130                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1131         if (!cfg->umv_space)
1132                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1133 }
1134
1135 /* hclge_get_cfg: query the static parameter from flash
1136  * @hdev: pointer to struct hclge_dev
1137  * @hcfg: the config structure to be getted
1138  */
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1140 {
1141         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142         struct hclge_cfg_param_cmd *req;
1143         int i, ret;
1144
1145         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1146                 u32 offset = 0;
1147
1148                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1150                                            true);
1151                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153                 /* Len should be united by 4 bytes when send to hardware */
1154                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156                 req->offset = cpu_to_le32(offset);
1157         }
1158
1159         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1160         if (ret) {
1161                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1162                 return ret;
1163         }
1164
1165         hclge_parse_cfg(hcfg, desc);
1166
1167         return 0;
1168 }
1169
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1171 {
1172         int ret;
1173
1174         ret = hclge_query_function_status(hdev);
1175         if (ret) {
1176                 dev_err(&hdev->pdev->dev,
1177                         "query function status error %d.\n", ret);
1178                 return ret;
1179         }
1180
1181         /* get pf resource */
1182         ret = hclge_query_pf_resource(hdev);
1183         if (ret)
1184                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1185
1186         return ret;
1187 }
1188
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1190 {
1191 #define HCLGE_MIN_TX_DESC       64
1192 #define HCLGE_MIN_RX_DESC       64
1193
1194         if (!is_kdump_kernel())
1195                 return;
1196
1197         dev_info(&hdev->pdev->dev,
1198                  "Running kdump kernel. Using minimal resources\n");
1199
1200         /* minimal queue pairs equals to the number of vports */
1201         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1204 }
1205
1206 static int hclge_configure(struct hclge_dev *hdev)
1207 {
1208         struct hclge_cfg cfg;
1209         int ret, i;
1210
1211         ret = hclge_get_cfg(hdev, &cfg);
1212         if (ret) {
1213                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1214                 return ret;
1215         }
1216
1217         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218         hdev->base_tqp_pid = 0;
1219         hdev->rss_size_max = cfg.rss_size_max;
1220         hdev->rx_buf_len = cfg.rx_buf_len;
1221         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222         hdev->hw.mac.media_type = cfg.media_type;
1223         hdev->hw.mac.phy_addr = cfg.phy_addr;
1224         hdev->num_tx_desc = cfg.tqp_desc_num;
1225         hdev->num_rx_desc = cfg.tqp_desc_num;
1226         hdev->tm_info.num_pg = 1;
1227         hdev->tc_max = cfg.tc_num;
1228         hdev->tm_info.hw_pfc_map = 0;
1229         hdev->wanted_umv_size = cfg.umv_space;
1230
1231         if (hnae3_dev_fd_supported(hdev)) {
1232                 hdev->fd_en = true;
1233                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1234         }
1235
1236         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1237         if (ret) {
1238                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1239                 return ret;
1240         }
1241
1242         hclge_parse_link_mode(hdev, cfg.speed_ability);
1243
1244         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245             (hdev->tc_max < 1)) {
1246                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1247                          hdev->tc_max);
1248                 hdev->tc_max = 1;
1249         }
1250
1251         /* Dev does not support DCB */
1252         if (!hnae3_dev_dcb_supported(hdev)) {
1253                 hdev->tc_max = 1;
1254                 hdev->pfc_max = 0;
1255         } else {
1256                 hdev->pfc_max = hdev->tc_max;
1257         }
1258
1259         hdev->tm_info.num_tc = 1;
1260
1261         /* Currently not support uncontiuous tc */
1262         for (i = 0; i < hdev->tm_info.num_tc; i++)
1263                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1264
1265         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1266
1267         hclge_init_kdump_kernel_config(hdev);
1268
1269         return ret;
1270 }
1271
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1273                             int tso_mss_max)
1274 {
1275         struct hclge_cfg_tso_status_cmd *req;
1276         struct hclge_desc desc;
1277         u16 tso_mss;
1278
1279         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1280
1281         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1282
1283         tso_mss = 0;
1284         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286         req->tso_mss_min = cpu_to_le16(tso_mss);
1287
1288         tso_mss = 0;
1289         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291         req->tso_mss_max = cpu_to_le16(tso_mss);
1292
1293         return hclge_cmd_send(&hdev->hw, &desc, 1);
1294 }
1295
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1297 {
1298         struct hclge_cfg_gro_status_cmd *req;
1299         struct hclge_desc desc;
1300         int ret;
1301
1302         if (!hnae3_dev_gro_supported(hdev))
1303                 return 0;
1304
1305         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1307
1308         req->gro_en = cpu_to_le16(en ? 1 : 0);
1309
1310         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1311         if (ret)
1312                 dev_err(&hdev->pdev->dev,
1313                         "GRO hardware config cmd failed, ret = %d\n", ret);
1314
1315         return ret;
1316 }
1317
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1319 {
1320         struct hclge_tqp *tqp;
1321         int i;
1322
1323         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1325         if (!hdev->htqp)
1326                 return -ENOMEM;
1327
1328         tqp = hdev->htqp;
1329
1330         for (i = 0; i < hdev->num_tqps; i++) {
1331                 tqp->dev = &hdev->pdev->dev;
1332                 tqp->index = i;
1333
1334                 tqp->q.ae_algo = &ae_algo;
1335                 tqp->q.buf_size = hdev->rx_buf_len;
1336                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339                         i * HCLGE_TQP_REG_SIZE;
1340
1341                 tqp++;
1342         }
1343
1344         return 0;
1345 }
1346
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1349 {
1350         struct hclge_tqp_map_cmd *req;
1351         struct hclge_desc desc;
1352         int ret;
1353
1354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1355
1356         req = (struct hclge_tqp_map_cmd *)desc.data;
1357         req->tqp_id = cpu_to_le16(tqp_pid);
1358         req->tqp_vf = func_id;
1359         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360                         1 << HCLGE_TQP_MAP_EN_B;
1361         req->tqp_vid = cpu_to_le16(tqp_vid);
1362
1363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1364         if (ret)
1365                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1366
1367         return ret;
1368 }
1369
1370 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1371 {
1372         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373         struct hclge_dev *hdev = vport->back;
1374         int i, alloced;
1375
1376         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377              alloced < num_tqps; i++) {
1378                 if (!hdev->htqp[i].alloced) {
1379                         hdev->htqp[i].q.handle = &vport->nic;
1380                         hdev->htqp[i].q.tqp_index = alloced;
1381                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384                         hdev->htqp[i].alloced = true;
1385                         alloced++;
1386                 }
1387         }
1388         vport->alloc_tqps = alloced;
1389         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1391
1392         return 0;
1393 }
1394
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396                             u16 num_tx_desc, u16 num_rx_desc)
1397
1398 {
1399         struct hnae3_handle *nic = &vport->nic;
1400         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401         struct hclge_dev *hdev = vport->back;
1402         int ret;
1403
1404         kinfo->num_tx_desc = num_tx_desc;
1405         kinfo->num_rx_desc = num_rx_desc;
1406
1407         kinfo->rx_buf_len = hdev->rx_buf_len;
1408
1409         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1411         if (!kinfo->tqp)
1412                 return -ENOMEM;
1413
1414         ret = hclge_assign_tqp(vport, num_tqps);
1415         if (ret)
1416                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1417
1418         return ret;
1419 }
1420
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422                                   struct hclge_vport *vport)
1423 {
1424         struct hnae3_handle *nic = &vport->nic;
1425         struct hnae3_knic_private_info *kinfo;
1426         u16 i;
1427
1428         kinfo = &nic->kinfo;
1429         for (i = 0; i < vport->alloc_tqps; i++) {
1430                 struct hclge_tqp *q =
1431                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1432                 bool is_pf;
1433                 int ret;
1434
1435                 is_pf = !(vport->vport_id);
1436                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1437                                              i, is_pf);
1438                 if (ret)
1439                         return ret;
1440         }
1441
1442         return 0;
1443 }
1444
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1446 {
1447         struct hclge_vport *vport = hdev->vport;
1448         u16 i, num_vport;
1449
1450         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451         for (i = 0; i < num_vport; i++) {
1452                 int ret;
1453
1454                 ret = hclge_map_tqp_to_vport(hdev, vport);
1455                 if (ret)
1456                         return ret;
1457
1458                 vport++;
1459         }
1460
1461         return 0;
1462 }
1463
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1465 {
1466         /* this would be initialized later */
1467 }
1468
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1470 {
1471         struct hnae3_handle *nic = &vport->nic;
1472         struct hclge_dev *hdev = vport->back;
1473         int ret;
1474
1475         nic->pdev = hdev->pdev;
1476         nic->ae_algo = &ae_algo;
1477         nic->numa_node_mask = hdev->numa_node_mask;
1478
1479         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480                 ret = hclge_knic_setup(vport, num_tqps,
1481                                        hdev->num_tx_desc, hdev->num_rx_desc);
1482
1483                 if (ret) {
1484                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1485                                 ret);
1486                         return ret;
1487                 }
1488         } else {
1489                 hclge_unic_setup(vport, num_tqps);
1490         }
1491
1492         return 0;
1493 }
1494
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1496 {
1497         struct pci_dev *pdev = hdev->pdev;
1498         struct hclge_vport *vport;
1499         u32 tqp_main_vport;
1500         u32 tqp_per_vport;
1501         int num_vport, i;
1502         int ret;
1503
1504         /* We need to alloc a vport for main NIC of PF */
1505         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1506
1507         if (hdev->num_tqps < num_vport) {
1508                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509                         hdev->num_tqps, num_vport);
1510                 return -EINVAL;
1511         }
1512
1513         /* Alloc the same number of TQPs for every vport */
1514         tqp_per_vport = hdev->num_tqps / num_vport;
1515         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1516
1517         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1518                              GFP_KERNEL);
1519         if (!vport)
1520                 return -ENOMEM;
1521
1522         hdev->vport = vport;
1523         hdev->num_alloc_vport = num_vport;
1524
1525         if (IS_ENABLED(CONFIG_PCI_IOV))
1526                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1527
1528         for (i = 0; i < num_vport; i++) {
1529                 vport->back = hdev;
1530                 vport->vport_id = i;
1531                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534                 INIT_LIST_HEAD(&vport->vlan_list);
1535                 INIT_LIST_HEAD(&vport->uc_mac_list);
1536                 INIT_LIST_HEAD(&vport->mc_mac_list);
1537
1538                 if (i == 0)
1539                         ret = hclge_vport_setup(vport, tqp_main_vport);
1540                 else
1541                         ret = hclge_vport_setup(vport, tqp_per_vport);
1542                 if (ret) {
1543                         dev_err(&pdev->dev,
1544                                 "vport setup failed for vport %d, %d\n",
1545                                 i, ret);
1546                         return ret;
1547                 }
1548
1549                 vport++;
1550         }
1551
1552         return 0;
1553 }
1554
1555 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556                                     struct hclge_pkt_buf_alloc *buf_alloc)
1557 {
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1561         struct hclge_tx_buff_alloc_cmd *req;
1562         struct hclge_desc desc;
1563         int ret;
1564         u8 i;
1565
1566         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1567
1568         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1571
1572                 req->tx_pkt_buff[i] =
1573                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1575         }
1576
1577         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1578         if (ret)
1579                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1580                         ret);
1581
1582         return ret;
1583 }
1584
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586                                  struct hclge_pkt_buf_alloc *buf_alloc)
1587 {
1588         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1589
1590         if (ret)
1591                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1592
1593         return ret;
1594 }
1595
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1597 {
1598         int i, cnt = 0;
1599
1600         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601                 if (hdev->hw_tc_map & BIT(i))
1602                         cnt++;
1603         return cnt;
1604 }
1605
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608                                   struct hclge_pkt_buf_alloc *buf_alloc)
1609 {
1610         struct hclge_priv_buf *priv;
1611         int i, cnt = 0;
1612
1613         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614                 priv = &buf_alloc->priv_buf[i];
1615                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1616                     priv->enable)
1617                         cnt++;
1618         }
1619
1620         return cnt;
1621 }
1622
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625                                      struct hclge_pkt_buf_alloc *buf_alloc)
1626 {
1627         struct hclge_priv_buf *priv;
1628         int i, cnt = 0;
1629
1630         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631                 priv = &buf_alloc->priv_buf[i];
1632                 if (hdev->hw_tc_map & BIT(i) &&
1633                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1634                     priv->enable)
1635                         cnt++;
1636         }
1637
1638         return cnt;
1639 }
1640
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1642 {
1643         struct hclge_priv_buf *priv;
1644         u32 rx_priv = 0;
1645         int i;
1646
1647         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648                 priv = &buf_alloc->priv_buf[i];
1649                 if (priv->enable)
1650                         rx_priv += priv->buf_size;
1651         }
1652         return rx_priv;
1653 }
1654
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1656 {
1657         u32 i, total_tx_size = 0;
1658
1659         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1661
1662         return total_tx_size;
1663 }
1664
1665 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666                                 struct hclge_pkt_buf_alloc *buf_alloc,
1667                                 u32 rx_all)
1668 {
1669         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670         u32 tc_num = hclge_get_tc_num(hdev);
1671         u32 shared_buf, aligned_mps;
1672         u32 rx_priv;
1673         int i;
1674
1675         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1676
1677         if (hnae3_dev_dcb_supported(hdev))
1678                 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1679         else
1680                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681                                         + hdev->dv_buf_size;
1682
1683         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685                              HCLGE_BUF_SIZE_UNIT);
1686
1687         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688         if (rx_all < rx_priv + shared_std)
1689                 return false;
1690
1691         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692         buf_alloc->s_buf.buf_size = shared_buf;
1693         if (hnae3_dev_dcb_supported(hdev)) {
1694                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696                         - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1697         } else {
1698                 buf_alloc->s_buf.self.high = aligned_mps +
1699                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700                 buf_alloc->s_buf.self.low = aligned_mps;
1701         }
1702
1703         if (hnae3_dev_dcb_supported(hdev)) {
1704                 if (tc_num)
1705                         hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1706                 else
1707                         hi_thrd = shared_buf - hdev->dv_buf_size;
1708
1709                 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711                 lo_thrd = hi_thrd - aligned_mps / 2;
1712         } else {
1713                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714                 lo_thrd = aligned_mps;
1715         }
1716
1717         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1720         }
1721
1722         return true;
1723 }
1724
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726                                 struct hclge_pkt_buf_alloc *buf_alloc)
1727 {
1728         u32 i, total_size;
1729
1730         total_size = hdev->pkt_buf_size;
1731
1732         /* alloc tx buffer for all enabled tc */
1733         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1735
1736                 if (hdev->hw_tc_map & BIT(i)) {
1737                         if (total_size < hdev->tx_buf_size)
1738                                 return -ENOMEM;
1739
1740                         priv->tx_buf_size = hdev->tx_buf_size;
1741                 } else {
1742                         priv->tx_buf_size = 0;
1743                 }
1744
1745                 total_size -= priv->tx_buf_size;
1746         }
1747
1748         return 0;
1749 }
1750
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752                                   struct hclge_pkt_buf_alloc *buf_alloc)
1753 {
1754         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1756         int i;
1757
1758         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1760
1761                 priv->enable = 0;
1762                 priv->wl.low = 0;
1763                 priv->wl.high = 0;
1764                 priv->buf_size = 0;
1765
1766                 if (!(hdev->hw_tc_map & BIT(i)))
1767                         continue;
1768
1769                 priv->enable = 1;
1770
1771                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772                         priv->wl.low = max ? aligned_mps : 256;
1773                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774                                                 HCLGE_BUF_SIZE_UNIT);
1775                 } else {
1776                         priv->wl.low = 0;
1777                         priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1778                 }
1779
1780                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1781         }
1782
1783         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1784 }
1785
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787                                           struct hclge_pkt_buf_alloc *buf_alloc)
1788 {
1789         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1791         int i;
1792
1793         /* let the last to be cleared first */
1794         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1796
1797                 if (hdev->hw_tc_map & BIT(i) &&
1798                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799                         /* Clear the no pfc TC private buffer */
1800                         priv->wl.low = 0;
1801                         priv->wl.high = 0;
1802                         priv->buf_size = 0;
1803                         priv->enable = 0;
1804                         no_pfc_priv_num--;
1805                 }
1806
1807                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808                     no_pfc_priv_num == 0)
1809                         break;
1810         }
1811
1812         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1813 }
1814
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816                                         struct hclge_pkt_buf_alloc *buf_alloc)
1817 {
1818         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1820         int i;
1821
1822         /* let the last to be cleared first */
1823         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1825
1826                 if (hdev->hw_tc_map & BIT(i) &&
1827                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1828                         /* Reduce the number of pfc TC with private buffer */
1829                         priv->wl.low = 0;
1830                         priv->enable = 0;
1831                         priv->wl.high = 0;
1832                         priv->buf_size = 0;
1833                         pfc_priv_num--;
1834                 }
1835
1836                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1837                     pfc_priv_num == 0)
1838                         break;
1839         }
1840
1841         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1842 }
1843
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845  * @hdev: pointer to struct hclge_dev
1846  * @buf_alloc: pointer to buffer calculation data
1847  * @return: 0: calculate sucessful, negative: fail
1848  */
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850                                 struct hclge_pkt_buf_alloc *buf_alloc)
1851 {
1852         /* When DCB is not supported, rx private buffer is not allocated. */
1853         if (!hnae3_dev_dcb_supported(hdev)) {
1854                 u32 rx_all = hdev->pkt_buf_size;
1855
1856                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1858                         return -ENOMEM;
1859
1860                 return 0;
1861         }
1862
1863         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1864                 return 0;
1865
1866         /* try to decrease the buffer size */
1867         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1868                 return 0;
1869
1870         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1871                 return 0;
1872
1873         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1874                 return 0;
1875
1876         return -ENOMEM;
1877 }
1878
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880                                    struct hclge_pkt_buf_alloc *buf_alloc)
1881 {
1882         struct hclge_rx_priv_buff_cmd *req;
1883         struct hclge_desc desc;
1884         int ret;
1885         int i;
1886
1887         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1889
1890         /* Alloc private buffer TCs */
1891         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1893
1894                 req->buf_num[i] =
1895                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1896                 req->buf_num[i] |=
1897                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1898         }
1899
1900         req->shared_buf =
1901                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1903
1904         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1905         if (ret)
1906                 dev_err(&hdev->pdev->dev,
1907                         "rx private buffer alloc cmd failed %d\n", ret);
1908
1909         return ret;
1910 }
1911
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913                                    struct hclge_pkt_buf_alloc *buf_alloc)
1914 {
1915         struct hclge_rx_priv_wl_buf *req;
1916         struct hclge_priv_buf *priv;
1917         struct hclge_desc desc[2];
1918         int i, j;
1919         int ret;
1920
1921         for (i = 0; i < 2; i++) {
1922                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1923                                            false);
1924                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1925
1926                 /* The first descriptor set the NEXT bit to 1 */
1927                 if (i == 0)
1928                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1929                 else
1930                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1931
1932                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1934
1935                         priv = &buf_alloc->priv_buf[idx];
1936                         req->tc_wl[j].high =
1937                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938                         req->tc_wl[j].high |=
1939                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1940                         req->tc_wl[j].low =
1941                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942                         req->tc_wl[j].low |=
1943                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1944                 }
1945         }
1946
1947         /* Send 2 descriptor at one time */
1948         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1949         if (ret)
1950                 dev_err(&hdev->pdev->dev,
1951                         "rx private waterline config cmd failed %d\n",
1952                         ret);
1953         return ret;
1954 }
1955
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957                                     struct hclge_pkt_buf_alloc *buf_alloc)
1958 {
1959         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960         struct hclge_rx_com_thrd *req;
1961         struct hclge_desc desc[2];
1962         struct hclge_tc_thrd *tc;
1963         int i, j;
1964         int ret;
1965
1966         for (i = 0; i < 2; i++) {
1967                 hclge_cmd_setup_basic_desc(&desc[i],
1968                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1970
1971                 /* The first descriptor set the NEXT bit to 1 */
1972                 if (i == 0)
1973                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1974                 else
1975                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1976
1977                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1979
1980                         req->com_thrd[j].high =
1981                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982                         req->com_thrd[j].high |=
1983                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984                         req->com_thrd[j].low =
1985                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986                         req->com_thrd[j].low |=
1987                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1988                 }
1989         }
1990
1991         /* Send 2 descriptors at one time */
1992         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1993         if (ret)
1994                 dev_err(&hdev->pdev->dev,
1995                         "common threshold config cmd failed %d\n", ret);
1996         return ret;
1997 }
1998
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000                                   struct hclge_pkt_buf_alloc *buf_alloc)
2001 {
2002         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003         struct hclge_rx_com_wl *req;
2004         struct hclge_desc desc;
2005         int ret;
2006
2007         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2008
2009         req = (struct hclge_rx_com_wl *)desc.data;
2010         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2012
2013         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2015
2016         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2017         if (ret)
2018                 dev_err(&hdev->pdev->dev,
2019                         "common waterline config cmd failed %d\n", ret);
2020
2021         return ret;
2022 }
2023
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2025 {
2026         struct hclge_pkt_buf_alloc *pkt_buf;
2027         int ret;
2028
2029         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2030         if (!pkt_buf)
2031                 return -ENOMEM;
2032
2033         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2034         if (ret) {
2035                 dev_err(&hdev->pdev->dev,
2036                         "could not calc tx buffer size for all TCs %d\n", ret);
2037                 goto out;
2038         }
2039
2040         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2041         if (ret) {
2042                 dev_err(&hdev->pdev->dev,
2043                         "could not alloc tx buffers %d\n", ret);
2044                 goto out;
2045         }
2046
2047         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2048         if (ret) {
2049                 dev_err(&hdev->pdev->dev,
2050                         "could not calc rx priv buffer size for all TCs %d\n",
2051                         ret);
2052                 goto out;
2053         }
2054
2055         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2056         if (ret) {
2057                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2058                         ret);
2059                 goto out;
2060         }
2061
2062         if (hnae3_dev_dcb_supported(hdev)) {
2063                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2064                 if (ret) {
2065                         dev_err(&hdev->pdev->dev,
2066                                 "could not configure rx private waterline %d\n",
2067                                 ret);
2068                         goto out;
2069                 }
2070
2071                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2072                 if (ret) {
2073                         dev_err(&hdev->pdev->dev,
2074                                 "could not configure common threshold %d\n",
2075                                 ret);
2076                         goto out;
2077                 }
2078         }
2079
2080         ret = hclge_common_wl_config(hdev, pkt_buf);
2081         if (ret)
2082                 dev_err(&hdev->pdev->dev,
2083                         "could not configure common waterline %d\n", ret);
2084
2085 out:
2086         kfree(pkt_buf);
2087         return ret;
2088 }
2089
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2091 {
2092         struct hnae3_handle *roce = &vport->roce;
2093         struct hnae3_handle *nic = &vport->nic;
2094
2095         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2096
2097         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098             vport->back->num_msi_left == 0)
2099                 return -EINVAL;
2100
2101         roce->rinfo.base_vector = vport->back->roce_base_vector;
2102
2103         roce->rinfo.netdev = nic->kinfo.netdev;
2104         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2105
2106         roce->pdev = nic->pdev;
2107         roce->ae_algo = nic->ae_algo;
2108         roce->numa_node_mask = nic->numa_node_mask;
2109
2110         return 0;
2111 }
2112
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2114 {
2115         struct pci_dev *pdev = hdev->pdev;
2116         int vectors;
2117         int i;
2118
2119         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2121         if (vectors < 0) {
2122                 dev_err(&pdev->dev,
2123                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2124                         vectors);
2125                 return vectors;
2126         }
2127         if (vectors < hdev->num_msi)
2128                 dev_warn(&hdev->pdev->dev,
2129                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130                          hdev->num_msi, vectors);
2131
2132         hdev->num_msi = vectors;
2133         hdev->num_msi_left = vectors;
2134         hdev->base_msi_vector = pdev->irq;
2135         hdev->roce_base_vector = hdev->base_msi_vector +
2136                                 hdev->roce_base_msix_offset;
2137
2138         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139                                            sizeof(u16), GFP_KERNEL);
2140         if (!hdev->vector_status) {
2141                 pci_free_irq_vectors(pdev);
2142                 return -ENOMEM;
2143         }
2144
2145         for (i = 0; i < hdev->num_msi; i++)
2146                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2147
2148         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149                                         sizeof(int), GFP_KERNEL);
2150         if (!hdev->vector_irq) {
2151                 pci_free_irq_vectors(pdev);
2152                 return -ENOMEM;
2153         }
2154
2155         return 0;
2156 }
2157
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2159 {
2160
2161         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162                 duplex = HCLGE_MAC_FULL;
2163
2164         return duplex;
2165 }
2166
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2168                                       u8 duplex)
2169 {
2170         struct hclge_config_mac_speed_dup_cmd *req;
2171         struct hclge_desc desc;
2172         int ret;
2173
2174         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2175
2176         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2177
2178         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2179
2180         switch (speed) {
2181         case HCLGE_MAC_SPEED_10M:
2182                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183                                 HCLGE_CFG_SPEED_S, 6);
2184                 break;
2185         case HCLGE_MAC_SPEED_100M:
2186                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187                                 HCLGE_CFG_SPEED_S, 7);
2188                 break;
2189         case HCLGE_MAC_SPEED_1G:
2190                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191                                 HCLGE_CFG_SPEED_S, 0);
2192                 break;
2193         case HCLGE_MAC_SPEED_10G:
2194                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195                                 HCLGE_CFG_SPEED_S, 1);
2196                 break;
2197         case HCLGE_MAC_SPEED_25G:
2198                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199                                 HCLGE_CFG_SPEED_S, 2);
2200                 break;
2201         case HCLGE_MAC_SPEED_40G:
2202                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203                                 HCLGE_CFG_SPEED_S, 3);
2204                 break;
2205         case HCLGE_MAC_SPEED_50G:
2206                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207                                 HCLGE_CFG_SPEED_S, 4);
2208                 break;
2209         case HCLGE_MAC_SPEED_100G:
2210                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211                                 HCLGE_CFG_SPEED_S, 5);
2212                 break;
2213         default:
2214                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2215                 return -EINVAL;
2216         }
2217
2218         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2219                       1);
2220
2221         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2222         if (ret) {
2223                 dev_err(&hdev->pdev->dev,
2224                         "mac speed/duplex config cmd failed %d.\n", ret);
2225                 return ret;
2226         }
2227
2228         return 0;
2229 }
2230
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2232 {
2233         int ret;
2234
2235         duplex = hclge_check_speed_dup(duplex, speed);
2236         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2237                 return 0;
2238
2239         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2240         if (ret)
2241                 return ret;
2242
2243         hdev->hw.mac.speed = speed;
2244         hdev->hw.mac.duplex = duplex;
2245
2246         return 0;
2247 }
2248
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2250                                      u8 duplex)
2251 {
2252         struct hclge_vport *vport = hclge_get_vport(handle);
2253         struct hclge_dev *hdev = vport->back;
2254
2255         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2256 }
2257
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2259 {
2260         struct hclge_config_auto_neg_cmd *req;
2261         struct hclge_desc desc;
2262         u32 flag = 0;
2263         int ret;
2264
2265         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2266
2267         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2270
2271         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2272         if (ret)
2273                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2274                         ret);
2275
2276         return ret;
2277 }
2278
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2280 {
2281         struct hclge_vport *vport = hclge_get_vport(handle);
2282         struct hclge_dev *hdev = vport->back;
2283
2284         if (!hdev->hw.mac.support_autoneg) {
2285                 if (enable) {
2286                         dev_err(&hdev->pdev->dev,
2287                                 "autoneg is not supported by current port\n");
2288                         return -EOPNOTSUPP;
2289                 } else {
2290                         return 0;
2291                 }
2292         }
2293
2294         return hclge_set_autoneg_en(hdev, enable);
2295 }
2296
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2298 {
2299         struct hclge_vport *vport = hclge_get_vport(handle);
2300         struct hclge_dev *hdev = vport->back;
2301         struct phy_device *phydev = hdev->hw.mac.phydev;
2302
2303         if (phydev)
2304                 return phydev->autoneg;
2305
2306         return hdev->hw.mac.autoneg;
2307 }
2308
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2310 {
2311         struct hclge_vport *vport = hclge_get_vport(handle);
2312         struct hclge_dev *hdev = vport->back;
2313         int ret;
2314
2315         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2316
2317         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2318         if (ret)
2319                 return ret;
2320         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2321 }
2322
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2324 {
2325         struct hclge_config_fec_cmd *req;
2326         struct hclge_desc desc;
2327         int ret;
2328
2329         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2330
2331         req = (struct hclge_config_fec_cmd *)desc.data;
2332         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334         if (fec_mode & BIT(HNAE3_FEC_RS))
2335                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337         if (fec_mode & BIT(HNAE3_FEC_BASER))
2338                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2340
2341         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2342         if (ret)
2343                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2344
2345         return ret;
2346 }
2347
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2349 {
2350         struct hclge_vport *vport = hclge_get_vport(handle);
2351         struct hclge_dev *hdev = vport->back;
2352         struct hclge_mac *mac = &hdev->hw.mac;
2353         int ret;
2354
2355         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2357                 return -EINVAL;
2358         }
2359
2360         ret = hclge_set_fec_hw(hdev, fec_mode);
2361         if (ret)
2362                 return ret;
2363
2364         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2365         return 0;
2366 }
2367
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2369                           u8 *fec_mode)
2370 {
2371         struct hclge_vport *vport = hclge_get_vport(handle);
2372         struct hclge_dev *hdev = vport->back;
2373         struct hclge_mac *mac = &hdev->hw.mac;
2374
2375         if (fec_ability)
2376                 *fec_ability = mac->fec_ability;
2377         if (fec_mode)
2378                 *fec_mode = mac->fec_mode;
2379 }
2380
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2382 {
2383         struct hclge_mac *mac = &hdev->hw.mac;
2384         int ret;
2385
2386         hdev->support_sfp_query = true;
2387         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389                                          hdev->hw.mac.duplex);
2390         if (ret) {
2391                 dev_err(&hdev->pdev->dev,
2392                         "Config mac speed dup fail ret=%d\n", ret);
2393                 return ret;
2394         }
2395
2396         mac->link = 0;
2397
2398         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2400                 if (ret) {
2401                         dev_err(&hdev->pdev->dev,
2402                                 "Fec mode init fail, ret = %d\n", ret);
2403                         return ret;
2404                 }
2405         }
2406
2407         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2408         if (ret) {
2409                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2410                 return ret;
2411         }
2412
2413         ret = hclge_buffer_alloc(hdev);
2414         if (ret)
2415                 dev_err(&hdev->pdev->dev,
2416                         "allocate buffer fail, ret=%d\n", ret);
2417
2418         return ret;
2419 }
2420
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2422 {
2423         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425                 schedule_work(&hdev->mbx_service_task);
2426 }
2427
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2429 {
2430         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2431             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2432                 schedule_work(&hdev->rst_service_task);
2433 }
2434
2435 static void hclge_task_schedule(struct hclge_dev *hdev)
2436 {
2437         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2438             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2439             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2440                 (void)schedule_work(&hdev->service_task);
2441 }
2442
2443 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2444 {
2445         struct hclge_link_status_cmd *req;
2446         struct hclge_desc desc;
2447         int link_status;
2448         int ret;
2449
2450         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2451         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2452         if (ret) {
2453                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2454                         ret);
2455                 return ret;
2456         }
2457
2458         req = (struct hclge_link_status_cmd *)desc.data;
2459         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2460
2461         return !!link_status;
2462 }
2463
2464 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2465 {
2466         int mac_state;
2467         int link_stat;
2468
2469         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2470                 return 0;
2471
2472         mac_state = hclge_get_mac_link_status(hdev);
2473
2474         if (hdev->hw.mac.phydev) {
2475                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2476                         link_stat = mac_state &
2477                                 hdev->hw.mac.phydev->link;
2478                 else
2479                         link_stat = 0;
2480
2481         } else {
2482                 link_stat = mac_state;
2483         }
2484
2485         return !!link_stat;
2486 }
2487
2488 static void hclge_update_link_status(struct hclge_dev *hdev)
2489 {
2490         struct hnae3_client *rclient = hdev->roce_client;
2491         struct hnae3_client *client = hdev->nic_client;
2492         struct hnae3_handle *rhandle;
2493         struct hnae3_handle *handle;
2494         int state;
2495         int i;
2496
2497         if (!client)
2498                 return;
2499         state = hclge_get_mac_phy_link(hdev);
2500         if (state != hdev->hw.mac.link) {
2501                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2502                         handle = &hdev->vport[i].nic;
2503                         client->ops->link_status_change(handle, state);
2504                         hclge_config_mac_tnl_int(hdev, state);
2505                         rhandle = &hdev->vport[i].roce;
2506                         if (rclient && rclient->ops->link_status_change)
2507                                 rclient->ops->link_status_change(rhandle,
2508                                                                  state);
2509                 }
2510                 hdev->hw.mac.link = state;
2511         }
2512 }
2513
2514 static void hclge_update_port_capability(struct hclge_mac *mac)
2515 {
2516         /* update fec ability by speed */
2517         hclge_convert_setting_fec(mac);
2518
2519         /* firmware can not identify back plane type, the media type
2520          * read from configuration can help deal it
2521          */
2522         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2523             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2524                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2525         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2526                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2527
2528         if (mac->support_autoneg == true) {
2529                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2530                 linkmode_copy(mac->advertising, mac->supported);
2531         } else {
2532                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2533                                    mac->supported);
2534                 linkmode_zero(mac->advertising);
2535         }
2536 }
2537
2538 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2539 {
2540         struct hclge_sfp_info_cmd *resp = NULL;
2541         struct hclge_desc desc;
2542         int ret;
2543
2544         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2545         resp = (struct hclge_sfp_info_cmd *)desc.data;
2546         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2547         if (ret == -EOPNOTSUPP) {
2548                 dev_warn(&hdev->pdev->dev,
2549                          "IMP do not support get SFP speed %d\n", ret);
2550                 return ret;
2551         } else if (ret) {
2552                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2553                 return ret;
2554         }
2555
2556         *speed = le32_to_cpu(resp->speed);
2557
2558         return 0;
2559 }
2560
2561 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2562 {
2563         struct hclge_sfp_info_cmd *resp;
2564         struct hclge_desc desc;
2565         int ret;
2566
2567         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2568         resp = (struct hclge_sfp_info_cmd *)desc.data;
2569
2570         resp->query_type = QUERY_ACTIVE_SPEED;
2571
2572         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2573         if (ret == -EOPNOTSUPP) {
2574                 dev_warn(&hdev->pdev->dev,
2575                          "IMP does not support get SFP info %d\n", ret);
2576                 return ret;
2577         } else if (ret) {
2578                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2579                 return ret;
2580         }
2581
2582         mac->speed = le32_to_cpu(resp->speed);
2583         /* if resp->speed_ability is 0, it means it's an old version
2584          * firmware, do not update these params
2585          */
2586         if (resp->speed_ability) {
2587                 mac->module_type = le32_to_cpu(resp->module_type);
2588                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2589                 mac->autoneg = resp->autoneg;
2590                 mac->support_autoneg = resp->autoneg_ability;
2591                 if (!resp->active_fec)
2592                         mac->fec_mode = 0;
2593                 else
2594                         mac->fec_mode = BIT(resp->active_fec);
2595         } else {
2596                 mac->speed_type = QUERY_SFP_SPEED;
2597         }
2598
2599         return 0;
2600 }
2601
2602 static int hclge_update_port_info(struct hclge_dev *hdev)
2603 {
2604         struct hclge_mac *mac = &hdev->hw.mac;
2605         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2606         int ret;
2607
2608         /* get the port info from SFP cmd if not copper port */
2609         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2610                 return 0;
2611
2612         /* if IMP does not support get SFP/qSFP info, return directly */
2613         if (!hdev->support_sfp_query)
2614                 return 0;
2615
2616         if (hdev->pdev->revision >= 0x21)
2617                 ret = hclge_get_sfp_info(hdev, mac);
2618         else
2619                 ret = hclge_get_sfp_speed(hdev, &speed);
2620
2621         if (ret == -EOPNOTSUPP) {
2622                 hdev->support_sfp_query = false;
2623                 return ret;
2624         } else if (ret) {
2625                 return ret;
2626         }
2627
2628         if (hdev->pdev->revision >= 0x21) {
2629                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2630                         hclge_update_port_capability(mac);
2631                         return 0;
2632                 }
2633                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2634                                                HCLGE_MAC_FULL);
2635         } else {
2636                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2637                         return 0; /* do nothing if no SFP */
2638
2639                 /* must config full duplex for SFP */
2640                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2641         }
2642 }
2643
2644 static int hclge_get_status(struct hnae3_handle *handle)
2645 {
2646         struct hclge_vport *vport = hclge_get_vport(handle);
2647         struct hclge_dev *hdev = vport->back;
2648
2649         hclge_update_link_status(hdev);
2650
2651         return hdev->hw.mac.link;
2652 }
2653
2654 static void hclge_service_timer(struct timer_list *t)
2655 {
2656         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2657
2658         mod_timer(&hdev->service_timer, jiffies + HZ);
2659         hdev->hw_stats.stats_timer++;
2660         hdev->fd_arfs_expire_timer++;
2661         hclge_task_schedule(hdev);
2662 }
2663
2664 static void hclge_service_complete(struct hclge_dev *hdev)
2665 {
2666         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2667
2668         /* Flush memory before next watchdog */
2669         smp_mb__before_atomic();
2670         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2671 }
2672
2673 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2674 {
2675         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2676
2677         /* fetch the events from their corresponding regs */
2678         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2679         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2680         msix_src_reg = hclge_read_dev(&hdev->hw,
2681                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2682
2683         /* Assumption: If by any chance reset and mailbox events are reported
2684          * together then we will only process reset event in this go and will
2685          * defer the processing of the mailbox events. Since, we would have not
2686          * cleared RX CMDQ event this time we would receive again another
2687          * interrupt from H/W just for the mailbox.
2688          */
2689
2690         /* check for vector0 reset event sources */
2691         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2692                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2693                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2694                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2695                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2696                 hdev->rst_stats.imp_rst_cnt++;
2697                 return HCLGE_VECTOR0_EVENT_RST;
2698         }
2699
2700         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2701                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2702                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2703                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2704                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2705                 hdev->rst_stats.global_rst_cnt++;
2706                 return HCLGE_VECTOR0_EVENT_RST;
2707         }
2708
2709         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2710                 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2711                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2712                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2713                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2714                 hdev->rst_stats.core_rst_cnt++;
2715                 return HCLGE_VECTOR0_EVENT_RST;
2716         }
2717
2718         /* check for vector0 msix event source */
2719         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2720                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2721                         msix_src_reg);
2722                 return HCLGE_VECTOR0_EVENT_ERR;
2723         }
2724
2725         /* check for vector0 mailbox(=CMDQ RX) event source */
2726         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2727                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2728                 *clearval = cmdq_src_reg;
2729                 return HCLGE_VECTOR0_EVENT_MBX;
2730         }
2731
2732         /* print other vector0 event source */
2733         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2734                 cmdq_src_reg, msix_src_reg);
2735         return HCLGE_VECTOR0_EVENT_OTHER;
2736 }
2737
2738 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2739                                     u32 regclr)
2740 {
2741         switch (event_type) {
2742         case HCLGE_VECTOR0_EVENT_RST:
2743                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2744                 break;
2745         case HCLGE_VECTOR0_EVENT_MBX:
2746                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2747                 break;
2748         default:
2749                 break;
2750         }
2751 }
2752
2753 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2754 {
2755         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2756                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2757                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2758                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2759         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2760 }
2761
2762 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2763 {
2764         writel(enable ? 1 : 0, vector->addr);
2765 }
2766
2767 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2768 {
2769         struct hclge_dev *hdev = data;
2770         u32 event_cause;
2771         u32 clearval;
2772
2773         hclge_enable_vector(&hdev->misc_vector, false);
2774         event_cause = hclge_check_event_cause(hdev, &clearval);
2775
2776         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2777         switch (event_cause) {
2778         case HCLGE_VECTOR0_EVENT_ERR:
2779                 /* we do not know what type of reset is required now. This could
2780                  * only be decided after we fetch the type of errors which
2781                  * caused this event. Therefore, we will do below for now:
2782                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2783                  *    have defered type of reset to be used.
2784                  * 2. Schedule the reset serivce task.
2785                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2786                  *    will fetch the correct type of reset.  This would be done
2787                  *    by first decoding the types of errors.
2788                  */
2789                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2790                 /* fall through */
2791         case HCLGE_VECTOR0_EVENT_RST:
2792                 hclge_reset_task_schedule(hdev);
2793                 break;
2794         case HCLGE_VECTOR0_EVENT_MBX:
2795                 /* If we are here then,
2796                  * 1. Either we are not handling any mbx task and we are not
2797                  *    scheduled as well
2798                  *                        OR
2799                  * 2. We could be handling a mbx task but nothing more is
2800                  *    scheduled.
2801                  * In both cases, we should schedule mbx task as there are more
2802                  * mbx messages reported by this interrupt.
2803                  */
2804                 hclge_mbx_task_schedule(hdev);
2805                 break;
2806         default:
2807                 dev_warn(&hdev->pdev->dev,
2808                          "received unknown or unhandled event of vector0\n");
2809                 break;
2810         }
2811
2812         /* clear the source of interrupt if it is not cause by reset */
2813         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2814                 hclge_clear_event_cause(hdev, event_cause, clearval);
2815                 hclge_enable_vector(&hdev->misc_vector, true);
2816         }
2817
2818         return IRQ_HANDLED;
2819 }
2820
2821 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2822 {
2823         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2824                 dev_warn(&hdev->pdev->dev,
2825                          "vector(vector_id %d) has been freed.\n", vector_id);
2826                 return;
2827         }
2828
2829         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2830         hdev->num_msi_left += 1;
2831         hdev->num_msi_used -= 1;
2832 }
2833
2834 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2835 {
2836         struct hclge_misc_vector *vector = &hdev->misc_vector;
2837
2838         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2839
2840         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2841         hdev->vector_status[0] = 0;
2842
2843         hdev->num_msi_left -= 1;
2844         hdev->num_msi_used += 1;
2845 }
2846
2847 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2848 {
2849         int ret;
2850
2851         hclge_get_misc_vector(hdev);
2852
2853         /* this would be explicitly freed in the end */
2854         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2855                           0, "hclge_misc", hdev);
2856         if (ret) {
2857                 hclge_free_vector(hdev, 0);
2858                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2859                         hdev->misc_vector.vector_irq);
2860         }
2861
2862         return ret;
2863 }
2864
2865 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2866 {
2867         free_irq(hdev->misc_vector.vector_irq, hdev);
2868         hclge_free_vector(hdev, 0);
2869 }
2870
2871 int hclge_notify_client(struct hclge_dev *hdev,
2872                         enum hnae3_reset_notify_type type)
2873 {
2874         struct hnae3_client *client = hdev->nic_client;
2875         u16 i;
2876
2877         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2878             !client)
2879                 return 0;
2880
2881         if (!client->ops->reset_notify)
2882                 return -EOPNOTSUPP;
2883
2884         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2885                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2886                 int ret;
2887
2888                 ret = client->ops->reset_notify(handle, type);
2889                 if (ret) {
2890                         dev_err(&hdev->pdev->dev,
2891                                 "notify nic client failed %d(%d)\n", type, ret);
2892                         return ret;
2893                 }
2894         }
2895
2896         return 0;
2897 }
2898
2899 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2900                                     enum hnae3_reset_notify_type type)
2901 {
2902         struct hnae3_client *client = hdev->roce_client;
2903         int ret = 0;
2904         u16 i;
2905
2906         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
2907             !client)
2908                 return 0;
2909
2910         if (!client->ops->reset_notify)
2911                 return -EOPNOTSUPP;
2912
2913         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2914                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2915
2916                 ret = client->ops->reset_notify(handle, type);
2917                 if (ret) {
2918                         dev_err(&hdev->pdev->dev,
2919                                 "notify roce client failed %d(%d)",
2920                                 type, ret);
2921                         return ret;
2922                 }
2923         }
2924
2925         return ret;
2926 }
2927
2928 static int hclge_reset_wait(struct hclge_dev *hdev)
2929 {
2930 #define HCLGE_RESET_WATI_MS     100
2931 #define HCLGE_RESET_WAIT_CNT    200
2932         u32 val, reg, reg_bit;
2933         u32 cnt = 0;
2934
2935         switch (hdev->reset_type) {
2936         case HNAE3_IMP_RESET:
2937                 reg = HCLGE_GLOBAL_RESET_REG;
2938                 reg_bit = HCLGE_IMP_RESET_BIT;
2939                 break;
2940         case HNAE3_GLOBAL_RESET:
2941                 reg = HCLGE_GLOBAL_RESET_REG;
2942                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2943                 break;
2944         case HNAE3_CORE_RESET:
2945                 reg = HCLGE_GLOBAL_RESET_REG;
2946                 reg_bit = HCLGE_CORE_RESET_BIT;
2947                 break;
2948         case HNAE3_FUNC_RESET:
2949                 reg = HCLGE_FUN_RST_ING;
2950                 reg_bit = HCLGE_FUN_RST_ING_B;
2951                 break;
2952         case HNAE3_FLR_RESET:
2953                 break;
2954         default:
2955                 dev_err(&hdev->pdev->dev,
2956                         "Wait for unsupported reset type: %d\n",
2957                         hdev->reset_type);
2958                 return -EINVAL;
2959         }
2960
2961         if (hdev->reset_type == HNAE3_FLR_RESET) {
2962                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2963                        cnt++ < HCLGE_RESET_WAIT_CNT)
2964                         msleep(HCLGE_RESET_WATI_MS);
2965
2966                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2967                         dev_err(&hdev->pdev->dev,
2968                                 "flr wait timeout: %d\n", cnt);
2969                         return -EBUSY;
2970                 }
2971
2972                 return 0;
2973         }
2974
2975         val = hclge_read_dev(&hdev->hw, reg);
2976         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2977                 msleep(HCLGE_RESET_WATI_MS);
2978                 val = hclge_read_dev(&hdev->hw, reg);
2979                 cnt++;
2980         }
2981
2982         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2983                 dev_warn(&hdev->pdev->dev,
2984                          "Wait for reset timeout: %d\n", hdev->reset_type);
2985                 return -EBUSY;
2986         }
2987
2988         return 0;
2989 }
2990
2991 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2992 {
2993         struct hclge_vf_rst_cmd *req;
2994         struct hclge_desc desc;
2995
2996         req = (struct hclge_vf_rst_cmd *)desc.data;
2997         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2998         req->dest_vfid = func_id;
2999
3000         if (reset)
3001                 req->vf_rst = 0x1;
3002
3003         return hclge_cmd_send(&hdev->hw, &desc, 1);
3004 }
3005
3006 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3007 {
3008         int i;
3009
3010         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3011                 struct hclge_vport *vport = &hdev->vport[i];
3012                 int ret;
3013
3014                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3015                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3016                 if (ret) {
3017                         dev_err(&hdev->pdev->dev,
3018                                 "set vf(%d) rst failed %d!\n",
3019                                 vport->vport_id, ret);
3020                         return ret;
3021                 }
3022
3023                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3024                         continue;
3025
3026                 /* Inform VF to process the reset.
3027                  * hclge_inform_reset_assert_to_vf may fail if VF
3028                  * driver is not loaded.
3029                  */
3030                 ret = hclge_inform_reset_assert_to_vf(vport);
3031                 if (ret)
3032                         dev_warn(&hdev->pdev->dev,
3033                                  "inform reset to vf(%d) failed %d!\n",
3034                                  vport->vport_id, ret);
3035         }
3036
3037         return 0;
3038 }
3039
3040 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3041 {
3042         struct hclge_desc desc;
3043         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3044         int ret;
3045
3046         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3047         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3048         req->fun_reset_vfid = func_id;
3049
3050         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3051         if (ret)
3052                 dev_err(&hdev->pdev->dev,
3053                         "send function reset cmd fail, status =%d\n", ret);
3054
3055         return ret;
3056 }
3057
3058 static void hclge_do_reset(struct hclge_dev *hdev)
3059 {
3060         struct hnae3_handle *handle = &hdev->vport[0].nic;
3061         struct pci_dev *pdev = hdev->pdev;
3062         u32 val;
3063
3064         if (hclge_get_hw_reset_stat(handle)) {
3065                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3066                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3067                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3068                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3069                 return;
3070         }
3071
3072         switch (hdev->reset_type) {
3073         case HNAE3_GLOBAL_RESET:
3074                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3075                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3076                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3077                 dev_info(&pdev->dev, "Global Reset requested\n");
3078                 break;
3079         case HNAE3_CORE_RESET:
3080                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3081                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3082                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3083                 dev_info(&pdev->dev, "Core Reset requested\n");
3084                 break;
3085         case HNAE3_FUNC_RESET:
3086                 dev_info(&pdev->dev, "PF Reset requested\n");
3087                 /* schedule again to check later */
3088                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3089                 hclge_reset_task_schedule(hdev);
3090                 break;
3091         case HNAE3_FLR_RESET:
3092                 dev_info(&pdev->dev, "FLR requested\n");
3093                 /* schedule again to check later */
3094                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3095                 hclge_reset_task_schedule(hdev);
3096                 break;
3097         default:
3098                 dev_warn(&pdev->dev,
3099                          "Unsupported reset type: %d\n", hdev->reset_type);
3100                 break;
3101         }
3102 }
3103
3104 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3105                                                    unsigned long *addr)
3106 {
3107         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3108
3109         /* first, resolve any unknown reset type to the known type(s) */
3110         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3111                 /* we will intentionally ignore any errors from this function
3112                  *  as we will end up in *some* reset request in any case
3113                  */
3114                 hclge_handle_hw_msix_error(hdev, addr);
3115                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3116                 /* We defered the clearing of the error event which caused
3117                  * interrupt since it was not posssible to do that in
3118                  * interrupt context (and this is the reason we introduced
3119                  * new UNKNOWN reset type). Now, the errors have been
3120                  * handled and cleared in hardware we can safely enable
3121                  * interrupts. This is an exception to the norm.
3122                  */
3123                 hclge_enable_vector(&hdev->misc_vector, true);
3124         }
3125
3126         /* return the highest priority reset level amongst all */
3127         if (test_bit(HNAE3_IMP_RESET, addr)) {
3128                 rst_level = HNAE3_IMP_RESET;
3129                 clear_bit(HNAE3_IMP_RESET, addr);
3130                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3131                 clear_bit(HNAE3_CORE_RESET, addr);
3132                 clear_bit(HNAE3_FUNC_RESET, addr);
3133         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3134                 rst_level = HNAE3_GLOBAL_RESET;
3135                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3136                 clear_bit(HNAE3_CORE_RESET, addr);
3137                 clear_bit(HNAE3_FUNC_RESET, addr);
3138         } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3139                 rst_level = HNAE3_CORE_RESET;
3140                 clear_bit(HNAE3_CORE_RESET, addr);
3141                 clear_bit(HNAE3_FUNC_RESET, addr);
3142         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3143                 rst_level = HNAE3_FUNC_RESET;
3144                 clear_bit(HNAE3_FUNC_RESET, addr);
3145         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3146                 rst_level = HNAE3_FLR_RESET;
3147                 clear_bit(HNAE3_FLR_RESET, addr);
3148         }
3149
3150         if (hdev->reset_type != HNAE3_NONE_RESET &&
3151             rst_level < hdev->reset_type)
3152                 return HNAE3_NONE_RESET;
3153
3154         return rst_level;
3155 }
3156
3157 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3158 {
3159         u32 clearval = 0;
3160
3161         switch (hdev->reset_type) {
3162         case HNAE3_IMP_RESET:
3163                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3164                 break;
3165         case HNAE3_GLOBAL_RESET:
3166                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3167                 break;
3168         case HNAE3_CORE_RESET:
3169                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3170                 break;
3171         default:
3172                 break;
3173         }
3174
3175         if (!clearval)
3176                 return;
3177
3178         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3179         hclge_enable_vector(&hdev->misc_vector, true);
3180 }
3181
3182 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3183 {
3184         int ret = 0;
3185
3186         switch (hdev->reset_type) {
3187         case HNAE3_FUNC_RESET:
3188                 /* fall through */
3189         case HNAE3_FLR_RESET:
3190                 ret = hclge_set_all_vf_rst(hdev, true);
3191                 break;
3192         default:
3193                 break;
3194         }
3195
3196         return ret;
3197 }
3198
3199 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3200 {
3201 #define HCLGE_RESET_SYNC_TIME 100
3202
3203         u32 reg_val;
3204         int ret = 0;
3205
3206         switch (hdev->reset_type) {
3207         case HNAE3_FUNC_RESET:
3208                 /* There is no mechanism for PF to know if VF has stopped IO
3209                  * for now, just wait 100 ms for VF to stop IO
3210                  */
3211                 msleep(HCLGE_RESET_SYNC_TIME);
3212                 ret = hclge_func_reset_cmd(hdev, 0);
3213                 if (ret) {
3214                         dev_err(&hdev->pdev->dev,
3215                                 "asserting function reset fail %d!\n", ret);
3216                         return ret;
3217                 }
3218
3219                 /* After performaning pf reset, it is not necessary to do the
3220                  * mailbox handling or send any command to firmware, because
3221                  * any mailbox handling or command to firmware is only valid
3222                  * after hclge_cmd_init is called.
3223                  */
3224                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3225                 hdev->rst_stats.pf_rst_cnt++;
3226                 break;
3227         case HNAE3_FLR_RESET:
3228                 /* There is no mechanism for PF to know if VF has stopped IO
3229                  * for now, just wait 100 ms for VF to stop IO
3230                  */
3231                 msleep(HCLGE_RESET_SYNC_TIME);
3232                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3233                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3234                 hdev->rst_stats.flr_rst_cnt++;
3235                 break;
3236         case HNAE3_IMP_RESET:
3237                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3238                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3239                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3240                 break;
3241         default:
3242                 break;
3243         }
3244
3245         /* inform hardware that preparatory work is done */
3246         msleep(HCLGE_RESET_SYNC_TIME);
3247         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3248                         HCLGE_NIC_CMQ_ENABLE);
3249         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3250
3251         return ret;
3252 }
3253
3254 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3255 {
3256 #define MAX_RESET_FAIL_CNT 5
3257 #define RESET_UPGRADE_DELAY_SEC 10
3258
3259         if (hdev->reset_pending) {
3260                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3261                          hdev->reset_pending);
3262                 return true;
3263         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3264                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3265                     BIT(HCLGE_IMP_RESET_BIT))) {
3266                 dev_info(&hdev->pdev->dev,
3267                          "reset failed because IMP Reset is pending\n");
3268                 hclge_clear_reset_cause(hdev);
3269                 return false;
3270         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3271                 hdev->reset_fail_cnt++;
3272                 if (is_timeout) {
3273                         set_bit(hdev->reset_type, &hdev->reset_pending);
3274                         dev_info(&hdev->pdev->dev,
3275                                  "re-schedule to wait for hw reset done\n");
3276                         return true;
3277                 }
3278
3279                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3280                 hclge_clear_reset_cause(hdev);
3281                 mod_timer(&hdev->reset_timer,
3282                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3283
3284                 return false;
3285         }
3286
3287         hclge_clear_reset_cause(hdev);
3288         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3289         return false;
3290 }
3291
3292 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3293 {
3294         int ret = 0;
3295
3296         switch (hdev->reset_type) {
3297         case HNAE3_FUNC_RESET:
3298                 /* fall through */
3299         case HNAE3_FLR_RESET:
3300                 ret = hclge_set_all_vf_rst(hdev, false);
3301                 break;
3302         default:
3303                 break;
3304         }
3305
3306         return ret;
3307 }
3308
3309 static void hclge_reset(struct hclge_dev *hdev)
3310 {
3311         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3312         bool is_timeout = false;
3313         int ret;
3314
3315         /* Initialize ae_dev reset status as well, in case enet layer wants to
3316          * know if device is undergoing reset
3317          */
3318         ae_dev->reset_type = hdev->reset_type;
3319         hdev->rst_stats.reset_cnt++;
3320         /* perform reset of the stack & ae device for a client */
3321         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3322         if (ret)
3323                 goto err_reset;
3324
3325         ret = hclge_reset_prepare_down(hdev);
3326         if (ret)
3327                 goto err_reset;
3328
3329         rtnl_lock();
3330         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3331         if (ret)
3332                 goto err_reset_lock;
3333
3334         rtnl_unlock();
3335
3336         ret = hclge_reset_prepare_wait(hdev);
3337         if (ret)
3338                 goto err_reset;
3339
3340         if (hclge_reset_wait(hdev)) {
3341                 is_timeout = true;
3342                 goto err_reset;
3343         }
3344
3345         hdev->rst_stats.hw_reset_done_cnt++;
3346
3347         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3348         if (ret)
3349                 goto err_reset;
3350
3351         rtnl_lock();
3352         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3353         if (ret)
3354                 goto err_reset_lock;
3355
3356         ret = hclge_reset_ae_dev(hdev->ae_dev);
3357         if (ret)
3358                 goto err_reset_lock;
3359
3360         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3361         if (ret)
3362                 goto err_reset_lock;
3363
3364         ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3365         if (ret)
3366                 goto err_reset_lock;
3367
3368         hclge_clear_reset_cause(hdev);
3369
3370         ret = hclge_reset_prepare_up(hdev);
3371         if (ret)
3372                 goto err_reset_lock;
3373
3374         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3375         if (ret)
3376                 goto err_reset_lock;
3377
3378         rtnl_unlock();
3379
3380         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3381         if (ret)
3382                 goto err_reset;
3383
3384         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3385         if (ret)
3386                 goto err_reset;
3387
3388         hdev->last_reset_time = jiffies;
3389         hdev->reset_fail_cnt = 0;
3390         hdev->rst_stats.reset_done_cnt++;
3391         ae_dev->reset_type = HNAE3_NONE_RESET;
3392         del_timer(&hdev->reset_timer);
3393
3394         return;
3395
3396 err_reset_lock:
3397         rtnl_unlock();
3398 err_reset:
3399         if (hclge_reset_err_handle(hdev, is_timeout))
3400                 hclge_reset_task_schedule(hdev);
3401 }
3402
3403 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3404 {
3405         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3406         struct hclge_dev *hdev = ae_dev->priv;
3407
3408         /* We might end up getting called broadly because of 2 below cases:
3409          * 1. Recoverable error was conveyed through APEI and only way to bring
3410          *    normalcy is to reset.
3411          * 2. A new reset request from the stack due to timeout
3412          *
3413          * For the first case,error event might not have ae handle available.
3414          * check if this is a new reset request and we are not here just because
3415          * last reset attempt did not succeed and watchdog hit us again. We will
3416          * know this if last reset request did not occur very recently (watchdog
3417          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3418          * In case of new request we reset the "reset level" to PF reset.
3419          * And if it is a repeat reset request of the most recent one then we
3420          * want to make sure we throttle the reset request. Therefore, we will
3421          * not allow it again before 3*HZ times.
3422          */
3423         if (!handle)
3424                 handle = &hdev->vport[0].nic;
3425
3426         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3427                 return;
3428         else if (hdev->default_reset_request)
3429                 hdev->reset_level =
3430                         hclge_get_reset_level(hdev,
3431                                               &hdev->default_reset_request);
3432         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3433                 hdev->reset_level = HNAE3_FUNC_RESET;
3434
3435         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3436                  hdev->reset_level);
3437
3438         /* request reset & schedule reset task */
3439         set_bit(hdev->reset_level, &hdev->reset_request);
3440         hclge_reset_task_schedule(hdev);
3441
3442         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3443                 hdev->reset_level++;
3444 }
3445
3446 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3447                                         enum hnae3_reset_type rst_type)
3448 {
3449         struct hclge_dev *hdev = ae_dev->priv;
3450
3451         set_bit(rst_type, &hdev->default_reset_request);
3452 }
3453
3454 static void hclge_reset_timer(struct timer_list *t)
3455 {
3456         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3457
3458         dev_info(&hdev->pdev->dev,
3459                  "triggering global reset in reset timer\n");
3460         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3461         hclge_reset_event(hdev->pdev, NULL);
3462 }
3463
3464 static void hclge_reset_subtask(struct hclge_dev *hdev)
3465 {
3466         /* check if there is any ongoing reset in the hardware. This status can
3467          * be checked from reset_pending. If there is then, we need to wait for
3468          * hardware to complete reset.
3469          *    a. If we are able to figure out in reasonable time that hardware
3470          *       has fully resetted then, we can proceed with driver, client
3471          *       reset.
3472          *    b. else, we can come back later to check this status so re-sched
3473          *       now.
3474          */
3475         hdev->last_reset_time = jiffies;
3476         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3477         if (hdev->reset_type != HNAE3_NONE_RESET)
3478                 hclge_reset(hdev);
3479
3480         /* check if we got any *new* reset requests to be honored */
3481         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3482         if (hdev->reset_type != HNAE3_NONE_RESET)
3483                 hclge_do_reset(hdev);
3484
3485         hdev->reset_type = HNAE3_NONE_RESET;
3486 }
3487
3488 static void hclge_reset_service_task(struct work_struct *work)
3489 {
3490         struct hclge_dev *hdev =
3491                 container_of(work, struct hclge_dev, rst_service_task);
3492
3493         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3494                 return;
3495
3496         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3497
3498         hclge_reset_subtask(hdev);
3499
3500         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3501 }
3502
3503 static void hclge_mailbox_service_task(struct work_struct *work)
3504 {
3505         struct hclge_dev *hdev =
3506                 container_of(work, struct hclge_dev, mbx_service_task);
3507
3508         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3509                 return;
3510
3511         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3512
3513         hclge_mbx_handler(hdev);
3514
3515         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3516 }
3517
3518 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3519 {
3520         int i;
3521
3522         /* start from vport 1 for PF is always alive */
3523         for (i = 1; i < hdev->num_alloc_vport; i++) {
3524                 struct hclge_vport *vport = &hdev->vport[i];
3525
3526                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3527                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3528
3529                 /* If vf is not alive, set to default value */
3530                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3531                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3532         }
3533 }
3534
3535 static void hclge_service_task(struct work_struct *work)
3536 {
3537         struct hclge_dev *hdev =
3538                 container_of(work, struct hclge_dev, service_task);
3539
3540         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3541                 hclge_update_stats_for_all(hdev);
3542                 hdev->hw_stats.stats_timer = 0;
3543         }
3544
3545         hclge_update_port_info(hdev);
3546         hclge_update_link_status(hdev);
3547         hclge_update_vport_alive(hdev);
3548         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3549                 hclge_rfs_filter_expire(hdev);
3550                 hdev->fd_arfs_expire_timer = 0;
3551         }
3552         hclge_service_complete(hdev);
3553 }
3554
3555 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3556 {
3557         /* VF handle has no client */
3558         if (!handle->client)
3559                 return container_of(handle, struct hclge_vport, nic);
3560         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3561                 return container_of(handle, struct hclge_vport, roce);
3562         else
3563                 return container_of(handle, struct hclge_vport, nic);
3564 }
3565
3566 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3567                             struct hnae3_vector_info *vector_info)
3568 {
3569         struct hclge_vport *vport = hclge_get_vport(handle);
3570         struct hnae3_vector_info *vector = vector_info;
3571         struct hclge_dev *hdev = vport->back;
3572         int alloc = 0;
3573         int i, j;
3574
3575         vector_num = min(hdev->num_msi_left, vector_num);
3576
3577         for (j = 0; j < vector_num; j++) {
3578                 for (i = 1; i < hdev->num_msi; i++) {
3579                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3580                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3581                                 vector->io_addr = hdev->hw.io_base +
3582                                         HCLGE_VECTOR_REG_BASE +
3583                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3584                                         vport->vport_id *
3585                                         HCLGE_VECTOR_VF_OFFSET;
3586                                 hdev->vector_status[i] = vport->vport_id;
3587                                 hdev->vector_irq[i] = vector->vector;
3588
3589                                 vector++;
3590                                 alloc++;
3591
3592                                 break;
3593                         }
3594                 }
3595         }
3596         hdev->num_msi_left -= alloc;
3597         hdev->num_msi_used += alloc;
3598
3599         return alloc;
3600 }
3601
3602 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3603 {
3604         int i;
3605
3606         for (i = 0; i < hdev->num_msi; i++)
3607                 if (vector == hdev->vector_irq[i])
3608                         return i;
3609
3610         return -EINVAL;
3611 }
3612
3613 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3614 {
3615         struct hclge_vport *vport = hclge_get_vport(handle);
3616         struct hclge_dev *hdev = vport->back;
3617         int vector_id;
3618
3619         vector_id = hclge_get_vector_index(hdev, vector);
3620         if (vector_id < 0) {
3621                 dev_err(&hdev->pdev->dev,
3622                         "Get vector index fail. vector_id =%d\n", vector_id);
3623                 return vector_id;
3624         }
3625
3626         hclge_free_vector(hdev, vector_id);
3627
3628         return 0;
3629 }
3630
3631 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3632 {
3633         return HCLGE_RSS_KEY_SIZE;
3634 }
3635
3636 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3637 {
3638         return HCLGE_RSS_IND_TBL_SIZE;
3639 }
3640
3641 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3642                                   const u8 hfunc, const u8 *key)
3643 {
3644         struct hclge_rss_config_cmd *req;
3645         struct hclge_desc desc;
3646         int key_offset;
3647         int key_size;
3648         int ret;
3649
3650         req = (struct hclge_rss_config_cmd *)desc.data;
3651
3652         for (key_offset = 0; key_offset < 3; key_offset++) {
3653                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3654                                            false);
3655
3656                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3657                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3658
3659                 if (key_offset == 2)
3660                         key_size =
3661                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3662                 else
3663                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3664
3665                 memcpy(req->hash_key,
3666                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3667
3668                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3669                 if (ret) {
3670                         dev_err(&hdev->pdev->dev,
3671                                 "Configure RSS config fail, status = %d\n",
3672                                 ret);
3673                         return ret;
3674                 }
3675         }
3676         return 0;
3677 }
3678
3679 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3680 {
3681         struct hclge_rss_indirection_table_cmd *req;
3682         struct hclge_desc desc;
3683         int i, j;
3684         int ret;
3685
3686         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3687
3688         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3689                 hclge_cmd_setup_basic_desc
3690                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3691
3692                 req->start_table_index =
3693                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3694                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3695
3696                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3697                         req->rss_result[j] =
3698                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3699
3700                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3701                 if (ret) {
3702                         dev_err(&hdev->pdev->dev,
3703                                 "Configure rss indir table fail,status = %d\n",
3704                                 ret);
3705                         return ret;
3706                 }
3707         }
3708         return 0;
3709 }
3710
3711 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3712                                  u16 *tc_size, u16 *tc_offset)
3713 {
3714         struct hclge_rss_tc_mode_cmd *req;
3715         struct hclge_desc desc;
3716         int ret;
3717         int i;
3718
3719         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3720         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3721
3722         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3723                 u16 mode = 0;
3724
3725                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3726                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3727                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3728                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3729                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3730
3731                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3732         }
3733
3734         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3735         if (ret)
3736                 dev_err(&hdev->pdev->dev,
3737                         "Configure rss tc mode fail, status = %d\n", ret);
3738
3739         return ret;
3740 }
3741
3742 static void hclge_get_rss_type(struct hclge_vport *vport)
3743 {
3744         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3745             vport->rss_tuple_sets.ipv4_udp_en ||
3746             vport->rss_tuple_sets.ipv4_sctp_en ||
3747             vport->rss_tuple_sets.ipv6_tcp_en ||
3748             vport->rss_tuple_sets.ipv6_udp_en ||
3749             vport->rss_tuple_sets.ipv6_sctp_en)
3750                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3751         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3752                  vport->rss_tuple_sets.ipv6_fragment_en)
3753                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3754         else
3755                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3756 }
3757
3758 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3759 {
3760         struct hclge_rss_input_tuple_cmd *req;
3761         struct hclge_desc desc;
3762         int ret;
3763
3764         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3765
3766         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3767
3768         /* Get the tuple cfg from pf */
3769         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3770         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3771         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3772         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3773         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3774         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3775         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3776         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3777         hclge_get_rss_type(&hdev->vport[0]);
3778         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3779         if (ret)
3780                 dev_err(&hdev->pdev->dev,
3781                         "Configure rss input fail, status = %d\n", ret);
3782         return ret;
3783 }
3784
3785 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3786                          u8 *key, u8 *hfunc)
3787 {
3788         struct hclge_vport *vport = hclge_get_vport(handle);
3789         int i;
3790
3791         /* Get hash algorithm */
3792         if (hfunc) {
3793                 switch (vport->rss_algo) {
3794                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3795                         *hfunc = ETH_RSS_HASH_TOP;
3796                         break;
3797                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3798                         *hfunc = ETH_RSS_HASH_XOR;
3799                         break;
3800                 default:
3801                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3802                         break;
3803                 }
3804         }
3805
3806         /* Get the RSS Key required by the user */
3807         if (key)
3808                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3809
3810         /* Get indirect table */
3811         if (indir)
3812                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3813                         indir[i] =  vport->rss_indirection_tbl[i];
3814
3815         return 0;
3816 }
3817
3818 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3819                          const  u8 *key, const  u8 hfunc)
3820 {
3821         struct hclge_vport *vport = hclge_get_vport(handle);
3822         struct hclge_dev *hdev = vport->back;
3823         u8 hash_algo;
3824         int ret, i;
3825
3826         /* Set the RSS Hash Key if specififed by the user */
3827         if (key) {
3828                 switch (hfunc) {
3829                 case ETH_RSS_HASH_TOP:
3830                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3831                         break;
3832                 case ETH_RSS_HASH_XOR:
3833                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3834                         break;
3835                 case ETH_RSS_HASH_NO_CHANGE:
3836                         hash_algo = vport->rss_algo;
3837                         break;
3838                 default:
3839                         return -EINVAL;
3840                 }
3841
3842                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3843                 if (ret)
3844                         return ret;
3845
3846                 /* Update the shadow RSS key with user specified qids */
3847                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3848                 vport->rss_algo = hash_algo;
3849         }
3850
3851         /* Update the shadow RSS table with user specified qids */
3852         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3853                 vport->rss_indirection_tbl[i] = indir[i];
3854
3855         /* Update the hardware */
3856         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3857 }
3858
3859 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3860 {
3861         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3862
3863         if (nfc->data & RXH_L4_B_2_3)
3864                 hash_sets |= HCLGE_D_PORT_BIT;
3865         else
3866                 hash_sets &= ~HCLGE_D_PORT_BIT;
3867
3868         if (nfc->data & RXH_IP_SRC)
3869                 hash_sets |= HCLGE_S_IP_BIT;
3870         else
3871                 hash_sets &= ~HCLGE_S_IP_BIT;
3872
3873         if (nfc->data & RXH_IP_DST)
3874                 hash_sets |= HCLGE_D_IP_BIT;
3875         else
3876                 hash_sets &= ~HCLGE_D_IP_BIT;
3877
3878         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3879                 hash_sets |= HCLGE_V_TAG_BIT;
3880
3881         return hash_sets;
3882 }
3883
3884 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3885                                struct ethtool_rxnfc *nfc)
3886 {
3887         struct hclge_vport *vport = hclge_get_vport(handle);
3888         struct hclge_dev *hdev = vport->back;
3889         struct hclge_rss_input_tuple_cmd *req;
3890         struct hclge_desc desc;
3891         u8 tuple_sets;
3892         int ret;
3893
3894         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3895                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3896                 return -EINVAL;
3897
3898         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3899         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3900
3901         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3902         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3903         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3904         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3905         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3906         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3907         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3908         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3909
3910         tuple_sets = hclge_get_rss_hash_bits(nfc);
3911         switch (nfc->flow_type) {
3912         case TCP_V4_FLOW:
3913                 req->ipv4_tcp_en = tuple_sets;
3914                 break;
3915         case TCP_V6_FLOW:
3916                 req->ipv6_tcp_en = tuple_sets;
3917                 break;
3918         case UDP_V4_FLOW:
3919                 req->ipv4_udp_en = tuple_sets;
3920                 break;
3921         case UDP_V6_FLOW:
3922                 req->ipv6_udp_en = tuple_sets;
3923                 break;
3924         case SCTP_V4_FLOW:
3925                 req->ipv4_sctp_en = tuple_sets;
3926                 break;
3927         case SCTP_V6_FLOW:
3928                 if ((nfc->data & RXH_L4_B_0_1) ||
3929                     (nfc->data & RXH_L4_B_2_3))
3930                         return -EINVAL;
3931
3932                 req->ipv6_sctp_en = tuple_sets;
3933                 break;
3934         case IPV4_FLOW:
3935                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3936                 break;
3937         case IPV6_FLOW:
3938                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3939                 break;
3940         default:
3941                 return -EINVAL;
3942         }
3943
3944         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3945         if (ret) {
3946                 dev_err(&hdev->pdev->dev,
3947                         "Set rss tuple fail, status = %d\n", ret);
3948                 return ret;
3949         }
3950
3951         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3952         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3953         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3954         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3955         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3956         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3957         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3958         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3959         hclge_get_rss_type(vport);
3960         return 0;
3961 }
3962
3963 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3964                                struct ethtool_rxnfc *nfc)
3965 {
3966         struct hclge_vport *vport = hclge_get_vport(handle);
3967         u8 tuple_sets;
3968
3969         nfc->data = 0;
3970
3971         switch (nfc->flow_type) {
3972         case TCP_V4_FLOW:
3973                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3974                 break;
3975         case UDP_V4_FLOW:
3976                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3977                 break;
3978         case TCP_V6_FLOW:
3979                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3980                 break;
3981         case UDP_V6_FLOW:
3982                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3983                 break;
3984         case SCTP_V4_FLOW:
3985                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3986                 break;
3987         case SCTP_V6_FLOW:
3988                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3989                 break;
3990         case IPV4_FLOW:
3991         case IPV6_FLOW:
3992                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3993                 break;
3994         default:
3995                 return -EINVAL;
3996         }
3997
3998         if (!tuple_sets)
3999                 return 0;
4000
4001         if (tuple_sets & HCLGE_D_PORT_BIT)
4002                 nfc->data |= RXH_L4_B_2_3;
4003         if (tuple_sets & HCLGE_S_PORT_BIT)
4004                 nfc->data |= RXH_L4_B_0_1;
4005         if (tuple_sets & HCLGE_D_IP_BIT)
4006                 nfc->data |= RXH_IP_DST;
4007         if (tuple_sets & HCLGE_S_IP_BIT)
4008                 nfc->data |= RXH_IP_SRC;
4009
4010         return 0;
4011 }
4012
4013 static int hclge_get_tc_size(struct hnae3_handle *handle)
4014 {
4015         struct hclge_vport *vport = hclge_get_vport(handle);
4016         struct hclge_dev *hdev = vport->back;
4017
4018         return hdev->rss_size_max;
4019 }
4020
4021 int hclge_rss_init_hw(struct hclge_dev *hdev)
4022 {
4023         struct hclge_vport *vport = hdev->vport;
4024         u8 *rss_indir = vport[0].rss_indirection_tbl;
4025         u16 rss_size = vport[0].alloc_rss_size;
4026         u8 *key = vport[0].rss_hash_key;
4027         u8 hfunc = vport[0].rss_algo;
4028         u16 tc_offset[HCLGE_MAX_TC_NUM];
4029         u16 tc_valid[HCLGE_MAX_TC_NUM];
4030         u16 tc_size[HCLGE_MAX_TC_NUM];
4031         u16 roundup_size;
4032         int i, ret;
4033
4034         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4035         if (ret)
4036                 return ret;
4037
4038         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4039         if (ret)
4040                 return ret;
4041
4042         ret = hclge_set_rss_input_tuple(hdev);
4043         if (ret)
4044                 return ret;
4045
4046         /* Each TC have the same queue size, and tc_size set to hardware is
4047          * the log2 of roundup power of two of rss_size, the acutal queue
4048          * size is limited by indirection table.
4049          */
4050         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4051                 dev_err(&hdev->pdev->dev,
4052                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4053                         rss_size);
4054                 return -EINVAL;
4055         }
4056
4057         roundup_size = roundup_pow_of_two(rss_size);
4058         roundup_size = ilog2(roundup_size);
4059
4060         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4061                 tc_valid[i] = 0;
4062
4063                 if (!(hdev->hw_tc_map & BIT(i)))
4064                         continue;
4065
4066                 tc_valid[i] = 1;
4067                 tc_size[i] = roundup_size;
4068                 tc_offset[i] = rss_size * i;
4069         }
4070
4071         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4072 }
4073
4074 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4075 {
4076         struct hclge_vport *vport = hdev->vport;
4077         int i, j;
4078
4079         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4080                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4081                         vport[j].rss_indirection_tbl[i] =
4082                                 i % vport[j].alloc_rss_size;
4083         }
4084 }
4085
4086 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4087 {
4088         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4089         struct hclge_vport *vport = hdev->vport;
4090
4091         if (hdev->pdev->revision >= 0x21)
4092                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4093
4094         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4095                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4096                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4097                 vport[i].rss_tuple_sets.ipv4_udp_en =
4098                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4099                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4100                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4101                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4102                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4103                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4104                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4105                 vport[i].rss_tuple_sets.ipv6_udp_en =
4106                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4107                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4108                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4109                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4110                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4111
4112                 vport[i].rss_algo = rss_algo;
4113
4114                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4115                        HCLGE_RSS_KEY_SIZE);
4116         }
4117
4118         hclge_rss_indir_init_cfg(hdev);
4119 }
4120
4121 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4122                                 int vector_id, bool en,
4123                                 struct hnae3_ring_chain_node *ring_chain)
4124 {
4125         struct hclge_dev *hdev = vport->back;
4126         struct hnae3_ring_chain_node *node;
4127         struct hclge_desc desc;
4128         struct hclge_ctrl_vector_chain_cmd *req
4129                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4130         enum hclge_cmd_status status;
4131         enum hclge_opcode_type op;
4132         u16 tqp_type_and_id;
4133         int i;
4134
4135         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4136         hclge_cmd_setup_basic_desc(&desc, op, false);
4137         req->int_vector_id = vector_id;
4138
4139         i = 0;
4140         for (node = ring_chain; node; node = node->next) {
4141                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4142                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4143                                 HCLGE_INT_TYPE_S,
4144                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4145                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4146                                 HCLGE_TQP_ID_S, node->tqp_index);
4147                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4148                                 HCLGE_INT_GL_IDX_S,
4149                                 hnae3_get_field(node->int_gl_idx,
4150                                                 HNAE3_RING_GL_IDX_M,
4151                                                 HNAE3_RING_GL_IDX_S));
4152                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4153                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4154                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4155                         req->vfid = vport->vport_id;
4156
4157                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4158                         if (status) {
4159                                 dev_err(&hdev->pdev->dev,
4160                                         "Map TQP fail, status is %d.\n",
4161                                         status);
4162                                 return -EIO;
4163                         }
4164                         i = 0;
4165
4166                         hclge_cmd_setup_basic_desc(&desc,
4167                                                    op,
4168                                                    false);
4169                         req->int_vector_id = vector_id;
4170                 }
4171         }
4172
4173         if (i > 0) {
4174                 req->int_cause_num = i;
4175                 req->vfid = vport->vport_id;
4176                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4177                 if (status) {
4178                         dev_err(&hdev->pdev->dev,
4179                                 "Map TQP fail, status is %d.\n", status);
4180                         return -EIO;
4181                 }
4182         }
4183
4184         return 0;
4185 }
4186
4187 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4188                                     int vector,
4189                                     struct hnae3_ring_chain_node *ring_chain)
4190 {
4191         struct hclge_vport *vport = hclge_get_vport(handle);
4192         struct hclge_dev *hdev = vport->back;
4193         int vector_id;
4194
4195         vector_id = hclge_get_vector_index(hdev, vector);
4196         if (vector_id < 0) {
4197                 dev_err(&hdev->pdev->dev,
4198                         "Get vector index fail. vector_id =%d\n", vector_id);
4199                 return vector_id;
4200         }
4201
4202         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4203 }
4204
4205 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4206                                        int vector,
4207                                        struct hnae3_ring_chain_node *ring_chain)
4208 {
4209         struct hclge_vport *vport = hclge_get_vport(handle);
4210         struct hclge_dev *hdev = vport->back;
4211         int vector_id, ret;
4212
4213         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4214                 return 0;
4215
4216         vector_id = hclge_get_vector_index(hdev, vector);
4217         if (vector_id < 0) {
4218                 dev_err(&handle->pdev->dev,
4219                         "Get vector index fail. ret =%d\n", vector_id);
4220                 return vector_id;
4221         }
4222
4223         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4224         if (ret)
4225                 dev_err(&handle->pdev->dev,
4226                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4227                         vector_id,
4228                         ret);
4229
4230         return ret;
4231 }
4232
4233 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4234                                struct hclge_promisc_param *param)
4235 {
4236         struct hclge_promisc_cfg_cmd *req;
4237         struct hclge_desc desc;
4238         int ret;
4239
4240         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4241
4242         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4243         req->vf_id = param->vf_id;
4244
4245         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4246          * pdev revision(0x20), new revision support them. The
4247          * value of this two fields will not return error when driver
4248          * send command to fireware in revision(0x20).
4249          */
4250         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4251                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4252
4253         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4254         if (ret)
4255                 dev_err(&hdev->pdev->dev,
4256                         "Set promisc mode fail, status is %d.\n", ret);
4257
4258         return ret;
4259 }
4260
4261 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4262                               bool en_mc, bool en_bc, int vport_id)
4263 {
4264         if (!param)
4265                 return;
4266
4267         memset(param, 0, sizeof(struct hclge_promisc_param));
4268         if (en_uc)
4269                 param->enable = HCLGE_PROMISC_EN_UC;
4270         if (en_mc)
4271                 param->enable |= HCLGE_PROMISC_EN_MC;
4272         if (en_bc)
4273                 param->enable |= HCLGE_PROMISC_EN_BC;
4274         param->vf_id = vport_id;
4275 }
4276
4277 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4278                                   bool en_mc_pmc)
4279 {
4280         struct hclge_vport *vport = hclge_get_vport(handle);
4281         struct hclge_dev *hdev = vport->back;
4282         struct hclge_promisc_param param;
4283         bool en_bc_pmc = true;
4284
4285         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4286          * always bypassed. So broadcast promisc should be disabled until
4287          * user enable promisc mode
4288          */
4289         if (handle->pdev->revision == 0x20)
4290                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4291
4292         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4293                                  vport->vport_id);
4294         return hclge_cmd_set_promisc_mode(hdev, &param);
4295 }
4296
4297 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4298 {
4299         struct hclge_get_fd_mode_cmd *req;
4300         struct hclge_desc desc;
4301         int ret;
4302
4303         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4304
4305         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4306
4307         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4308         if (ret) {
4309                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4310                 return ret;
4311         }
4312
4313         *fd_mode = req->mode;
4314
4315         return ret;
4316 }
4317
4318 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4319                                    u32 *stage1_entry_num,
4320                                    u32 *stage2_entry_num,
4321                                    u16 *stage1_counter_num,
4322                                    u16 *stage2_counter_num)
4323 {
4324         struct hclge_get_fd_allocation_cmd *req;
4325         struct hclge_desc desc;
4326         int ret;
4327
4328         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4329
4330         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4331
4332         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4333         if (ret) {
4334                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4335                         ret);
4336                 return ret;
4337         }
4338
4339         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4340         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4341         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4342         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4343
4344         return ret;
4345 }
4346
4347 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4348 {
4349         struct hclge_set_fd_key_config_cmd *req;
4350         struct hclge_fd_key_cfg *stage;
4351         struct hclge_desc desc;
4352         int ret;
4353
4354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4355
4356         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4357         stage = &hdev->fd_cfg.key_cfg[stage_num];
4358         req->stage = stage_num;
4359         req->key_select = stage->key_sel;
4360         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4361         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4362         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4363         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4364         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4365         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4366
4367         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4368         if (ret)
4369                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4370
4371         return ret;
4372 }
4373
4374 static int hclge_init_fd_config(struct hclge_dev *hdev)
4375 {
4376 #define LOW_2_WORDS             0x03
4377         struct hclge_fd_key_cfg *key_cfg;
4378         int ret;
4379
4380         if (!hnae3_dev_fd_supported(hdev))
4381                 return 0;
4382
4383         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4384         if (ret)
4385                 return ret;
4386
4387         switch (hdev->fd_cfg.fd_mode) {
4388         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4389                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4390                 break;
4391         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4392                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4393                 break;
4394         default:
4395                 dev_err(&hdev->pdev->dev,
4396                         "Unsupported flow director mode %d\n",
4397                         hdev->fd_cfg.fd_mode);
4398                 return -EOPNOTSUPP;
4399         }
4400
4401         hdev->fd_cfg.proto_support =
4402                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4403                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4404         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4405         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4406         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4407         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4408         key_cfg->outer_sipv6_word_en = 0;
4409         key_cfg->outer_dipv6_word_en = 0;
4410
4411         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4412                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4413                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4414                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4415
4416         /* If use max 400bit key, we can support tuples for ether type */
4417         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4418                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4419                 key_cfg->tuple_active |=
4420                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4421         }
4422
4423         /* roce_type is used to filter roce frames
4424          * dst_vport is used to specify the rule
4425          */
4426         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4427
4428         ret = hclge_get_fd_allocation(hdev,
4429                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4430                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4431                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4432                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4433         if (ret)
4434                 return ret;
4435
4436         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4437 }
4438
4439 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4440                                 int loc, u8 *key, bool is_add)
4441 {
4442         struct hclge_fd_tcam_config_1_cmd *req1;
4443         struct hclge_fd_tcam_config_2_cmd *req2;
4444         struct hclge_fd_tcam_config_3_cmd *req3;
4445         struct hclge_desc desc[3];
4446         int ret;
4447
4448         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4449         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4450         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4451         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4452         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4453
4454         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4455         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4456         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4457
4458         req1->stage = stage;
4459         req1->xy_sel = sel_x ? 1 : 0;
4460         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4461         req1->index = cpu_to_le32(loc);
4462         req1->entry_vld = sel_x ? is_add : 0;
4463
4464         if (key) {
4465                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4466                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4467                        sizeof(req2->tcam_data));
4468                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4469                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4470         }
4471
4472         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4473         if (ret)
4474                 dev_err(&hdev->pdev->dev,
4475                         "config tcam key fail, ret=%d\n",
4476                         ret);
4477
4478         return ret;
4479 }
4480
4481 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4482                               struct hclge_fd_ad_data *action)
4483 {
4484         struct hclge_fd_ad_config_cmd *req;
4485         struct hclge_desc desc;
4486         u64 ad_data = 0;
4487         int ret;
4488
4489         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4490
4491         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4492         req->index = cpu_to_le32(loc);
4493         req->stage = stage;
4494
4495         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4496                       action->write_rule_id_to_bd);
4497         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4498                         action->rule_id);
4499         ad_data <<= 32;
4500         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4501         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4502                       action->forward_to_direct_queue);
4503         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4504                         action->queue_id);
4505         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4506         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4507                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4508         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4509         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4510                         action->counter_id);
4511
4512         req->ad_data = cpu_to_le64(ad_data);
4513         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4514         if (ret)
4515                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4516
4517         return ret;
4518 }
4519
4520 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4521                                    struct hclge_fd_rule *rule)
4522 {
4523         u16 tmp_x_s, tmp_y_s;
4524         u32 tmp_x_l, tmp_y_l;
4525         int i;
4526
4527         if (rule->unused_tuple & tuple_bit)
4528                 return true;
4529
4530         switch (tuple_bit) {
4531         case 0:
4532                 return false;
4533         case BIT(INNER_DST_MAC):
4534                 for (i = 0; i < 6; i++) {
4535                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4536                                rule->tuples_mask.dst_mac[i]);
4537                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4538                                rule->tuples_mask.dst_mac[i]);
4539                 }
4540
4541                 return true;
4542         case BIT(INNER_SRC_MAC):
4543                 for (i = 0; i < 6; i++) {
4544                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4545                                rule->tuples.src_mac[i]);
4546                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4547                                rule->tuples.src_mac[i]);
4548                 }
4549
4550                 return true;
4551         case BIT(INNER_VLAN_TAG_FST):
4552                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4553                        rule->tuples_mask.vlan_tag1);
4554                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4555                        rule->tuples_mask.vlan_tag1);
4556                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4557                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4558
4559                 return true;
4560         case BIT(INNER_ETH_TYPE):
4561                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4562                        rule->tuples_mask.ether_proto);
4563                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4564                        rule->tuples_mask.ether_proto);
4565                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4566                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4567
4568                 return true;
4569         case BIT(INNER_IP_TOS):
4570                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4571                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4572
4573                 return true;
4574         case BIT(INNER_IP_PROTO):
4575                 calc_x(*key_x, rule->tuples.ip_proto,
4576                        rule->tuples_mask.ip_proto);
4577                 calc_y(*key_y, rule->tuples.ip_proto,
4578                        rule->tuples_mask.ip_proto);
4579
4580                 return true;
4581         case BIT(INNER_SRC_IP):
4582                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4583                        rule->tuples_mask.src_ip[3]);
4584                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4585                        rule->tuples_mask.src_ip[3]);
4586                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4587                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4588
4589                 return true;
4590         case BIT(INNER_DST_IP):
4591                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4592                        rule->tuples_mask.dst_ip[3]);
4593                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4594                        rule->tuples_mask.dst_ip[3]);
4595                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4596                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4597
4598                 return true;
4599         case BIT(INNER_SRC_PORT):
4600                 calc_x(tmp_x_s, rule->tuples.src_port,
4601                        rule->tuples_mask.src_port);
4602                 calc_y(tmp_y_s, rule->tuples.src_port,
4603                        rule->tuples_mask.src_port);
4604                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4605                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4606
4607                 return true;
4608         case BIT(INNER_DST_PORT):
4609                 calc_x(tmp_x_s, rule->tuples.dst_port,
4610                        rule->tuples_mask.dst_port);
4611                 calc_y(tmp_y_s, rule->tuples.dst_port,
4612                        rule->tuples_mask.dst_port);
4613                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4614                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4615
4616                 return true;
4617         default:
4618                 return false;
4619         }
4620 }
4621
4622 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4623                                  u8 vf_id, u8 network_port_id)
4624 {
4625         u32 port_number = 0;
4626
4627         if (port_type == HOST_PORT) {
4628                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4629                                 pf_id);
4630                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4631                                 vf_id);
4632                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4633         } else {
4634                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4635                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4636                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4637         }
4638
4639         return port_number;
4640 }
4641
4642 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4643                                        __le32 *key_x, __le32 *key_y,
4644                                        struct hclge_fd_rule *rule)
4645 {
4646         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4647         u8 cur_pos = 0, tuple_size, shift_bits;
4648         int i;
4649
4650         for (i = 0; i < MAX_META_DATA; i++) {
4651                 tuple_size = meta_data_key_info[i].key_length;
4652                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4653
4654                 switch (tuple_bit) {
4655                 case BIT(ROCE_TYPE):
4656                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4657                         cur_pos += tuple_size;
4658                         break;
4659                 case BIT(DST_VPORT):
4660                         port_number = hclge_get_port_number(HOST_PORT, 0,
4661                                                             rule->vf_id, 0);
4662                         hnae3_set_field(meta_data,
4663                                         GENMASK(cur_pos + tuple_size, cur_pos),
4664                                         cur_pos, port_number);
4665                         cur_pos += tuple_size;
4666                         break;
4667                 default:
4668                         break;
4669                 }
4670         }
4671
4672         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4673         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4674         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4675
4676         *key_x = cpu_to_le32(tmp_x << shift_bits);
4677         *key_y = cpu_to_le32(tmp_y << shift_bits);
4678 }
4679
4680 /* A complete key is combined with meta data key and tuple key.
4681  * Meta data key is stored at the MSB region, and tuple key is stored at
4682  * the LSB region, unused bits will be filled 0.
4683  */
4684 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4685                             struct hclge_fd_rule *rule)
4686 {
4687         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4688         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4689         u8 *cur_key_x, *cur_key_y;
4690         int i, ret, tuple_size;
4691         u8 meta_data_region;
4692
4693         memset(key_x, 0, sizeof(key_x));
4694         memset(key_y, 0, sizeof(key_y));
4695         cur_key_x = key_x;
4696         cur_key_y = key_y;
4697
4698         for (i = 0 ; i < MAX_TUPLE; i++) {
4699                 bool tuple_valid;
4700                 u32 check_tuple;
4701
4702                 tuple_size = tuple_key_info[i].key_length / 8;
4703                 check_tuple = key_cfg->tuple_active & BIT(i);
4704
4705                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4706                                                      cur_key_y, rule);
4707                 if (tuple_valid) {
4708                         cur_key_x += tuple_size;
4709                         cur_key_y += tuple_size;
4710                 }
4711         }
4712
4713         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4714                         MAX_META_DATA_LENGTH / 8;
4715
4716         hclge_fd_convert_meta_data(key_cfg,
4717                                    (__le32 *)(key_x + meta_data_region),
4718                                    (__le32 *)(key_y + meta_data_region),
4719                                    rule);
4720
4721         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4722                                    true);
4723         if (ret) {
4724                 dev_err(&hdev->pdev->dev,
4725                         "fd key_y config fail, loc=%d, ret=%d\n",
4726                         rule->queue_id, ret);
4727                 return ret;
4728         }
4729
4730         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4731                                    true);
4732         if (ret)
4733                 dev_err(&hdev->pdev->dev,
4734                         "fd key_x config fail, loc=%d, ret=%d\n",
4735                         rule->queue_id, ret);
4736         return ret;
4737 }
4738
4739 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4740                                struct hclge_fd_rule *rule)
4741 {
4742         struct hclge_fd_ad_data ad_data;
4743
4744         ad_data.ad_id = rule->location;
4745
4746         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4747                 ad_data.drop_packet = true;
4748                 ad_data.forward_to_direct_queue = false;
4749                 ad_data.queue_id = 0;
4750         } else {
4751                 ad_data.drop_packet = false;
4752                 ad_data.forward_to_direct_queue = true;
4753                 ad_data.queue_id = rule->queue_id;
4754         }
4755
4756         ad_data.use_counter = false;
4757         ad_data.counter_id = 0;
4758
4759         ad_data.use_next_stage = false;
4760         ad_data.next_input_key = 0;
4761
4762         ad_data.write_rule_id_to_bd = true;
4763         ad_data.rule_id = rule->location;
4764
4765         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4766 }
4767
4768 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4769                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4770 {
4771         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4772         struct ethtool_usrip4_spec *usr_ip4_spec;
4773         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4774         struct ethtool_usrip6_spec *usr_ip6_spec;
4775         struct ethhdr *ether_spec;
4776
4777         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4778                 return -EINVAL;
4779
4780         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4781                 return -EOPNOTSUPP;
4782
4783         if ((fs->flow_type & FLOW_EXT) &&
4784             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4785                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4786                 return -EOPNOTSUPP;
4787         }
4788
4789         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4790         case SCTP_V4_FLOW:
4791         case TCP_V4_FLOW:
4792         case UDP_V4_FLOW:
4793                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4794                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4795
4796                 if (!tcp_ip4_spec->ip4src)
4797                         *unused |= BIT(INNER_SRC_IP);
4798
4799                 if (!tcp_ip4_spec->ip4dst)
4800                         *unused |= BIT(INNER_DST_IP);
4801
4802                 if (!tcp_ip4_spec->psrc)
4803                         *unused |= BIT(INNER_SRC_PORT);
4804
4805                 if (!tcp_ip4_spec->pdst)
4806                         *unused |= BIT(INNER_DST_PORT);
4807
4808                 if (!tcp_ip4_spec->tos)
4809                         *unused |= BIT(INNER_IP_TOS);
4810
4811                 break;
4812         case IP_USER_FLOW:
4813                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4814                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4815                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4816
4817                 if (!usr_ip4_spec->ip4src)
4818                         *unused |= BIT(INNER_SRC_IP);
4819
4820                 if (!usr_ip4_spec->ip4dst)
4821                         *unused |= BIT(INNER_DST_IP);
4822
4823                 if (!usr_ip4_spec->tos)
4824                         *unused |= BIT(INNER_IP_TOS);
4825
4826                 if (!usr_ip4_spec->proto)
4827                         *unused |= BIT(INNER_IP_PROTO);
4828
4829                 if (usr_ip4_spec->l4_4_bytes)
4830                         return -EOPNOTSUPP;
4831
4832                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4833                         return -EOPNOTSUPP;
4834
4835                 break;
4836         case SCTP_V6_FLOW:
4837         case TCP_V6_FLOW:
4838         case UDP_V6_FLOW:
4839                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4840                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4841                         BIT(INNER_IP_TOS);
4842
4843                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4844                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4845                         *unused |= BIT(INNER_SRC_IP);
4846
4847                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4848                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4849                         *unused |= BIT(INNER_DST_IP);
4850
4851                 if (!tcp_ip6_spec->psrc)
4852                         *unused |= BIT(INNER_SRC_PORT);
4853
4854                 if (!tcp_ip6_spec->pdst)
4855                         *unused |= BIT(INNER_DST_PORT);
4856
4857                 if (tcp_ip6_spec->tclass)
4858                         return -EOPNOTSUPP;
4859
4860                 break;
4861         case IPV6_USER_FLOW:
4862                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4863                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4864                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4865                         BIT(INNER_DST_PORT);
4866
4867                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4868                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4869                         *unused |= BIT(INNER_SRC_IP);
4870
4871                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4872                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4873                         *unused |= BIT(INNER_DST_IP);
4874
4875                 if (!usr_ip6_spec->l4_proto)
4876                         *unused |= BIT(INNER_IP_PROTO);
4877
4878                 if (usr_ip6_spec->tclass)
4879                         return -EOPNOTSUPP;
4880
4881                 if (usr_ip6_spec->l4_4_bytes)
4882                         return -EOPNOTSUPP;
4883
4884                 break;
4885         case ETHER_FLOW:
4886                 ether_spec = &fs->h_u.ether_spec;
4887                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4888                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4889                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4890
4891                 if (is_zero_ether_addr(ether_spec->h_source))
4892                         *unused |= BIT(INNER_SRC_MAC);
4893
4894                 if (is_zero_ether_addr(ether_spec->h_dest))
4895                         *unused |= BIT(INNER_DST_MAC);
4896
4897                 if (!ether_spec->h_proto)
4898                         *unused |= BIT(INNER_ETH_TYPE);
4899
4900                 break;
4901         default:
4902                 return -EOPNOTSUPP;
4903         }
4904
4905         if ((fs->flow_type & FLOW_EXT)) {
4906                 if (fs->h_ext.vlan_etype)
4907                         return -EOPNOTSUPP;
4908                 if (!fs->h_ext.vlan_tci)
4909                         *unused |= BIT(INNER_VLAN_TAG_FST);
4910
4911                 if (fs->m_ext.vlan_tci) {
4912                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4913                                 return -EINVAL;
4914                 }
4915         } else {
4916                 *unused |= BIT(INNER_VLAN_TAG_FST);
4917         }
4918
4919         if (fs->flow_type & FLOW_MAC_EXT) {
4920                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4921                         return -EOPNOTSUPP;
4922
4923                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4924                         *unused |= BIT(INNER_DST_MAC);
4925                 else
4926                         *unused &= ~(BIT(INNER_DST_MAC));
4927         }
4928
4929         return 0;
4930 }
4931
4932 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4933 {
4934         struct hclge_fd_rule *rule = NULL;
4935         struct hlist_node *node2;
4936
4937         spin_lock_bh(&hdev->fd_rule_lock);
4938         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4939                 if (rule->location >= location)
4940                         break;
4941         }
4942
4943         spin_unlock_bh(&hdev->fd_rule_lock);
4944
4945         return  rule && rule->location == location;
4946 }
4947
4948 /* make sure being called after lock up with fd_rule_lock */
4949 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4950                                      struct hclge_fd_rule *new_rule,
4951                                      u16 location,
4952                                      bool is_add)
4953 {
4954         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4955         struct hlist_node *node2;
4956
4957         if (is_add && !new_rule)
4958                 return -EINVAL;
4959
4960         hlist_for_each_entry_safe(rule, node2,
4961                                   &hdev->fd_rule_list, rule_node) {
4962                 if (rule->location >= location)
4963                         break;
4964                 parent = rule;
4965         }
4966
4967         if (rule && rule->location == location) {
4968                 hlist_del(&rule->rule_node);
4969                 kfree(rule);
4970                 hdev->hclge_fd_rule_num--;
4971
4972                 if (!is_add) {
4973                         if (!hdev->hclge_fd_rule_num)
4974                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4975                         clear_bit(location, hdev->fd_bmap);
4976
4977                         return 0;
4978                 }
4979         } else if (!is_add) {
4980                 dev_err(&hdev->pdev->dev,
4981                         "delete fail, rule %d is inexistent\n",
4982                         location);
4983                 return -EINVAL;
4984         }
4985
4986         INIT_HLIST_NODE(&new_rule->rule_node);
4987
4988         if (parent)
4989                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4990         else
4991                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4992
4993         set_bit(location, hdev->fd_bmap);
4994         hdev->hclge_fd_rule_num++;
4995         hdev->fd_active_type = new_rule->rule_type;
4996
4997         return 0;
4998 }
4999
5000 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5001                               struct ethtool_rx_flow_spec *fs,
5002                               struct hclge_fd_rule *rule)
5003 {
5004         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5005
5006         switch (flow_type) {
5007         case SCTP_V4_FLOW:
5008         case TCP_V4_FLOW:
5009         case UDP_V4_FLOW:
5010                 rule->tuples.src_ip[3] =
5011                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5012                 rule->tuples_mask.src_ip[3] =
5013                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5014
5015                 rule->tuples.dst_ip[3] =
5016                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5017                 rule->tuples_mask.dst_ip[3] =
5018                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5019
5020                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5021                 rule->tuples_mask.src_port =
5022                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5023
5024                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5025                 rule->tuples_mask.dst_port =
5026                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5027
5028                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5029                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5030
5031                 rule->tuples.ether_proto = ETH_P_IP;
5032                 rule->tuples_mask.ether_proto = 0xFFFF;
5033
5034                 break;
5035         case IP_USER_FLOW:
5036                 rule->tuples.src_ip[3] =
5037                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5038                 rule->tuples_mask.src_ip[3] =
5039                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5040
5041                 rule->tuples.dst_ip[3] =
5042                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5043                 rule->tuples_mask.dst_ip[3] =
5044                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5045
5046                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5047                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5048
5049                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5050                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5051
5052                 rule->tuples.ether_proto = ETH_P_IP;
5053                 rule->tuples_mask.ether_proto = 0xFFFF;
5054
5055                 break;
5056         case SCTP_V6_FLOW:
5057         case TCP_V6_FLOW:
5058         case UDP_V6_FLOW:
5059                 be32_to_cpu_array(rule->tuples.src_ip,
5060                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
5061                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5062                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
5063
5064                 be32_to_cpu_array(rule->tuples.dst_ip,
5065                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
5066                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5067                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
5068
5069                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5070                 rule->tuples_mask.src_port =
5071                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5072
5073                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5074                 rule->tuples_mask.dst_port =
5075                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5076
5077                 rule->tuples.ether_proto = ETH_P_IPV6;
5078                 rule->tuples_mask.ether_proto = 0xFFFF;
5079
5080                 break;
5081         case IPV6_USER_FLOW:
5082                 be32_to_cpu_array(rule->tuples.src_ip,
5083                                   fs->h_u.usr_ip6_spec.ip6src, 4);
5084                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5085                                   fs->m_u.usr_ip6_spec.ip6src, 4);
5086
5087                 be32_to_cpu_array(rule->tuples.dst_ip,
5088                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
5089                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5090                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
5091
5092                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5093                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5094
5095                 rule->tuples.ether_proto = ETH_P_IPV6;
5096                 rule->tuples_mask.ether_proto = 0xFFFF;
5097
5098                 break;
5099         case ETHER_FLOW:
5100                 ether_addr_copy(rule->tuples.src_mac,
5101                                 fs->h_u.ether_spec.h_source);
5102                 ether_addr_copy(rule->tuples_mask.src_mac,
5103                                 fs->m_u.ether_spec.h_source);
5104
5105                 ether_addr_copy(rule->tuples.dst_mac,
5106                                 fs->h_u.ether_spec.h_dest);
5107                 ether_addr_copy(rule->tuples_mask.dst_mac,
5108                                 fs->m_u.ether_spec.h_dest);
5109
5110                 rule->tuples.ether_proto =
5111                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5112                 rule->tuples_mask.ether_proto =
5113                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5114
5115                 break;
5116         default:
5117                 return -EOPNOTSUPP;
5118         }
5119
5120         switch (flow_type) {
5121         case SCTP_V4_FLOW:
5122         case SCTP_V6_FLOW:
5123                 rule->tuples.ip_proto = IPPROTO_SCTP;
5124                 rule->tuples_mask.ip_proto = 0xFF;
5125                 break;
5126         case TCP_V4_FLOW:
5127         case TCP_V6_FLOW:
5128                 rule->tuples.ip_proto = IPPROTO_TCP;
5129                 rule->tuples_mask.ip_proto = 0xFF;
5130                 break;
5131         case UDP_V4_FLOW:
5132         case UDP_V6_FLOW:
5133                 rule->tuples.ip_proto = IPPROTO_UDP;
5134                 rule->tuples_mask.ip_proto = 0xFF;
5135                 break;
5136         default:
5137                 break;
5138         }
5139
5140         if ((fs->flow_type & FLOW_EXT)) {
5141                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5142                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5143         }
5144
5145         if (fs->flow_type & FLOW_MAC_EXT) {
5146                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5147                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5148         }
5149
5150         return 0;
5151 }
5152
5153 /* make sure being called after lock up with fd_rule_lock */
5154 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5155                                 struct hclge_fd_rule *rule)
5156 {
5157         int ret;
5158
5159         if (!rule) {
5160                 dev_err(&hdev->pdev->dev,
5161                         "The flow director rule is NULL\n");
5162                 return -EINVAL;
5163         }
5164
5165         /* it will never fail here, so needn't to check return value */
5166         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5167
5168         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5169         if (ret)
5170                 goto clear_rule;
5171
5172         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5173         if (ret)
5174                 goto clear_rule;
5175
5176         return 0;
5177
5178 clear_rule:
5179         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5180         return ret;
5181 }
5182
5183 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5184                               struct ethtool_rxnfc *cmd)
5185 {
5186         struct hclge_vport *vport = hclge_get_vport(handle);
5187         struct hclge_dev *hdev = vport->back;
5188         u16 dst_vport_id = 0, q_index = 0;
5189         struct ethtool_rx_flow_spec *fs;
5190         struct hclge_fd_rule *rule;
5191         u32 unused = 0;
5192         u8 action;
5193         int ret;
5194
5195         if (!hnae3_dev_fd_supported(hdev))
5196                 return -EOPNOTSUPP;
5197
5198         if (!hdev->fd_en) {
5199                 dev_warn(&hdev->pdev->dev,
5200                          "Please enable flow director first\n");
5201                 return -EOPNOTSUPP;
5202         }
5203
5204         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5205
5206         ret = hclge_fd_check_spec(hdev, fs, &unused);
5207         if (ret) {
5208                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5209                 return ret;
5210         }
5211
5212         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5213                 action = HCLGE_FD_ACTION_DROP_PACKET;
5214         } else {
5215                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5216                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5217                 u16 tqps;
5218
5219                 if (vf > hdev->num_req_vfs) {
5220                         dev_err(&hdev->pdev->dev,
5221                                 "Error: vf id (%d) > max vf num (%d)\n",
5222                                 vf, hdev->num_req_vfs);
5223                         return -EINVAL;
5224                 }
5225
5226                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5227                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5228
5229                 if (ring >= tqps) {
5230                         dev_err(&hdev->pdev->dev,
5231                                 "Error: queue id (%d) > max tqp num (%d)\n",
5232                                 ring, tqps - 1);
5233                         return -EINVAL;
5234                 }
5235
5236                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5237                 q_index = ring;
5238         }
5239
5240         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5241         if (!rule)
5242                 return -ENOMEM;
5243
5244         ret = hclge_fd_get_tuple(hdev, fs, rule);
5245         if (ret) {
5246                 kfree(rule);
5247                 return ret;
5248         }
5249
5250         rule->flow_type = fs->flow_type;
5251
5252         rule->location = fs->location;
5253         rule->unused_tuple = unused;
5254         rule->vf_id = dst_vport_id;
5255         rule->queue_id = q_index;
5256         rule->action = action;
5257         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5258
5259         /* to avoid rule conflict, when user configure rule by ethtool,
5260          * we need to clear all arfs rules
5261          */
5262         hclge_clear_arfs_rules(handle);
5263
5264         spin_lock_bh(&hdev->fd_rule_lock);
5265         ret = hclge_fd_config_rule(hdev, rule);
5266
5267         spin_unlock_bh(&hdev->fd_rule_lock);
5268
5269         return ret;
5270 }
5271
5272 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5273                               struct ethtool_rxnfc *cmd)
5274 {
5275         struct hclge_vport *vport = hclge_get_vport(handle);
5276         struct hclge_dev *hdev = vport->back;
5277         struct ethtool_rx_flow_spec *fs;
5278         int ret;
5279
5280         if (!hnae3_dev_fd_supported(hdev))
5281                 return -EOPNOTSUPP;
5282
5283         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5284
5285         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5286                 return -EINVAL;
5287
5288         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5289                 dev_err(&hdev->pdev->dev,
5290                         "Delete fail, rule %d is inexistent\n",
5291                         fs->location);
5292                 return -ENOENT;
5293         }
5294
5295         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5296                                    fs->location, NULL, false);
5297         if (ret)
5298                 return ret;
5299
5300         spin_lock_bh(&hdev->fd_rule_lock);
5301         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5302
5303         spin_unlock_bh(&hdev->fd_rule_lock);
5304
5305         return ret;
5306 }
5307
5308 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5309                                      bool clear_list)
5310 {
5311         struct hclge_vport *vport = hclge_get_vport(handle);
5312         struct hclge_dev *hdev = vport->back;
5313         struct hclge_fd_rule *rule;
5314         struct hlist_node *node;
5315         u16 location;
5316
5317         if (!hnae3_dev_fd_supported(hdev))
5318                 return;
5319
5320         spin_lock_bh(&hdev->fd_rule_lock);
5321         for_each_set_bit(location, hdev->fd_bmap,
5322                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5323                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5324                                      NULL, false);
5325
5326         if (clear_list) {
5327                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5328                                           rule_node) {
5329                         hlist_del(&rule->rule_node);
5330                         kfree(rule);
5331                 }
5332                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5333                 hdev->hclge_fd_rule_num = 0;
5334                 bitmap_zero(hdev->fd_bmap,
5335                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5336         }
5337
5338         spin_unlock_bh(&hdev->fd_rule_lock);
5339 }
5340
5341 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5342 {
5343         struct hclge_vport *vport = hclge_get_vport(handle);
5344         struct hclge_dev *hdev = vport->back;
5345         struct hclge_fd_rule *rule;
5346         struct hlist_node *node;
5347         int ret;
5348
5349         /* Return ok here, because reset error handling will check this
5350          * return value. If error is returned here, the reset process will
5351          * fail.
5352          */
5353         if (!hnae3_dev_fd_supported(hdev))
5354                 return 0;
5355
5356         /* if fd is disabled, should not restore it when reset */
5357         if (!hdev->fd_en)
5358                 return 0;
5359
5360         spin_lock_bh(&hdev->fd_rule_lock);
5361         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5362                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5363                 if (!ret)
5364                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5365
5366                 if (ret) {
5367                         dev_warn(&hdev->pdev->dev,
5368                                  "Restore rule %d failed, remove it\n",
5369                                  rule->location);
5370                         clear_bit(rule->location, hdev->fd_bmap);
5371                         hlist_del(&rule->rule_node);
5372                         kfree(rule);
5373                         hdev->hclge_fd_rule_num--;
5374                 }
5375         }
5376
5377         if (hdev->hclge_fd_rule_num)
5378                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5379
5380         spin_unlock_bh(&hdev->fd_rule_lock);
5381
5382         return 0;
5383 }
5384
5385 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5386                                  struct ethtool_rxnfc *cmd)
5387 {
5388         struct hclge_vport *vport = hclge_get_vport(handle);
5389         struct hclge_dev *hdev = vport->back;
5390
5391         if (!hnae3_dev_fd_supported(hdev))
5392                 return -EOPNOTSUPP;
5393
5394         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5395         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5396
5397         return 0;
5398 }
5399
5400 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5401                                   struct ethtool_rxnfc *cmd)
5402 {
5403         struct hclge_vport *vport = hclge_get_vport(handle);
5404         struct hclge_fd_rule *rule = NULL;
5405         struct hclge_dev *hdev = vport->back;
5406         struct ethtool_rx_flow_spec *fs;
5407         struct hlist_node *node2;
5408
5409         if (!hnae3_dev_fd_supported(hdev))
5410                 return -EOPNOTSUPP;
5411
5412         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5413
5414         spin_lock_bh(&hdev->fd_rule_lock);
5415
5416         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5417                 if (rule->location >= fs->location)
5418                         break;
5419         }
5420
5421         if (!rule || fs->location != rule->location) {
5422                 spin_unlock_bh(&hdev->fd_rule_lock);
5423
5424                 return -ENOENT;
5425         }
5426
5427         fs->flow_type = rule->flow_type;
5428         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5429         case SCTP_V4_FLOW:
5430         case TCP_V4_FLOW:
5431         case UDP_V4_FLOW:
5432                 fs->h_u.tcp_ip4_spec.ip4src =
5433                                 cpu_to_be32(rule->tuples.src_ip[3]);
5434                 fs->m_u.tcp_ip4_spec.ip4src =
5435                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5436                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5437
5438                 fs->h_u.tcp_ip4_spec.ip4dst =
5439                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5440                 fs->m_u.tcp_ip4_spec.ip4dst =
5441                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5442                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5443
5444                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5445                 fs->m_u.tcp_ip4_spec.psrc =
5446                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5447                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5448
5449                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5450                 fs->m_u.tcp_ip4_spec.pdst =
5451                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5452                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5453
5454                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5455                 fs->m_u.tcp_ip4_spec.tos =
5456                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5457                                 0 : rule->tuples_mask.ip_tos;
5458
5459                 break;
5460         case IP_USER_FLOW:
5461                 fs->h_u.usr_ip4_spec.ip4src =
5462                                 cpu_to_be32(rule->tuples.src_ip[3]);
5463                 fs->m_u.tcp_ip4_spec.ip4src =
5464                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5465                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5466
5467                 fs->h_u.usr_ip4_spec.ip4dst =
5468                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5469                 fs->m_u.usr_ip4_spec.ip4dst =
5470                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5471                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5472
5473                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5474                 fs->m_u.usr_ip4_spec.tos =
5475                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5476                                 0 : rule->tuples_mask.ip_tos;
5477
5478                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5479                 fs->m_u.usr_ip4_spec.proto =
5480                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5481                                 0 : rule->tuples_mask.ip_proto;
5482
5483                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5484
5485                 break;
5486         case SCTP_V6_FLOW:
5487         case TCP_V6_FLOW:
5488         case UDP_V6_FLOW:
5489                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5490                                   rule->tuples.src_ip, 4);
5491                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5492                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5493                 else
5494                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5495                                           rule->tuples_mask.src_ip, 4);
5496
5497                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5498                                   rule->tuples.dst_ip, 4);
5499                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5500                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5501                 else
5502                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5503                                           rule->tuples_mask.dst_ip, 4);
5504
5505                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5506                 fs->m_u.tcp_ip6_spec.psrc =
5507                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5508                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5509
5510                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5511                 fs->m_u.tcp_ip6_spec.pdst =
5512                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5513                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5514
5515                 break;
5516         case IPV6_USER_FLOW:
5517                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5518                                   rule->tuples.src_ip, 4);
5519                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5520                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5521                 else
5522                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5523                                           rule->tuples_mask.src_ip, 4);
5524
5525                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5526                                   rule->tuples.dst_ip, 4);
5527                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5528                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5529                 else
5530                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5531                                           rule->tuples_mask.dst_ip, 4);
5532
5533                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5534                 fs->m_u.usr_ip6_spec.l4_proto =
5535                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5536                                 0 : rule->tuples_mask.ip_proto;
5537
5538                 break;
5539         case ETHER_FLOW:
5540                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5541                                 rule->tuples.src_mac);
5542                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5543                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5544                 else
5545                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5546                                         rule->tuples_mask.src_mac);
5547
5548                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5549                                 rule->tuples.dst_mac);
5550                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5551                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5552                 else
5553                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5554                                         rule->tuples_mask.dst_mac);
5555
5556                 fs->h_u.ether_spec.h_proto =
5557                                 cpu_to_be16(rule->tuples.ether_proto);
5558                 fs->m_u.ether_spec.h_proto =
5559                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5560                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5561
5562                 break;
5563         default:
5564                 spin_unlock_bh(&hdev->fd_rule_lock);
5565                 return -EOPNOTSUPP;
5566         }
5567
5568         if (fs->flow_type & FLOW_EXT) {
5569                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5570                 fs->m_ext.vlan_tci =
5571                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5572                                 cpu_to_be16(VLAN_VID_MASK) :
5573                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5574         }
5575
5576         if (fs->flow_type & FLOW_MAC_EXT) {
5577                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5578                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5579                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5580                 else
5581                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5582                                         rule->tuples_mask.dst_mac);
5583         }
5584
5585         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5586                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5587         } else {
5588                 u64 vf_id;
5589
5590                 fs->ring_cookie = rule->queue_id;
5591                 vf_id = rule->vf_id;
5592                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5593                 fs->ring_cookie |= vf_id;
5594         }
5595
5596         spin_unlock_bh(&hdev->fd_rule_lock);
5597
5598         return 0;
5599 }
5600
5601 static int hclge_get_all_rules(struct hnae3_handle *handle,
5602                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5603 {
5604         struct hclge_vport *vport = hclge_get_vport(handle);
5605         struct hclge_dev *hdev = vport->back;
5606         struct hclge_fd_rule *rule;
5607         struct hlist_node *node2;
5608         int cnt = 0;
5609
5610         if (!hnae3_dev_fd_supported(hdev))
5611                 return -EOPNOTSUPP;
5612
5613         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5614
5615         spin_lock_bh(&hdev->fd_rule_lock);
5616         hlist_for_each_entry_safe(rule, node2,
5617                                   &hdev->fd_rule_list, rule_node) {
5618                 if (cnt == cmd->rule_cnt) {
5619                         spin_unlock_bh(&hdev->fd_rule_lock);
5620                         return -EMSGSIZE;
5621                 }
5622
5623                 rule_locs[cnt] = rule->location;
5624                 cnt++;
5625         }
5626
5627         spin_unlock_bh(&hdev->fd_rule_lock);
5628
5629         cmd->rule_cnt = cnt;
5630
5631         return 0;
5632 }
5633
5634 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5635                                      struct hclge_fd_rule_tuples *tuples)
5636 {
5637         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5638         tuples->ip_proto = fkeys->basic.ip_proto;
5639         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5640
5641         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5642                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5643                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5644         } else {
5645                 memcpy(tuples->src_ip,
5646                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5647                        sizeof(tuples->src_ip));
5648                 memcpy(tuples->dst_ip,
5649                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5650                        sizeof(tuples->dst_ip));
5651         }
5652 }
5653
5654 /* traverse all rules, check whether an existed rule has the same tuples */
5655 static struct hclge_fd_rule *
5656 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5657                           const struct hclge_fd_rule_tuples *tuples)
5658 {
5659         struct hclge_fd_rule *rule = NULL;
5660         struct hlist_node *node;
5661
5662         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5663                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5664                         return rule;
5665         }
5666
5667         return NULL;
5668 }
5669
5670 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5671                                      struct hclge_fd_rule *rule)
5672 {
5673         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5674                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5675                              BIT(INNER_SRC_PORT);
5676         rule->action = 0;
5677         rule->vf_id = 0;
5678         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5679         if (tuples->ether_proto == ETH_P_IP) {
5680                 if (tuples->ip_proto == IPPROTO_TCP)
5681                         rule->flow_type = TCP_V4_FLOW;
5682                 else
5683                         rule->flow_type = UDP_V4_FLOW;
5684         } else {
5685                 if (tuples->ip_proto == IPPROTO_TCP)
5686                         rule->flow_type = TCP_V6_FLOW;
5687                 else
5688                         rule->flow_type = UDP_V6_FLOW;
5689         }
5690         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5691         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5692 }
5693
5694 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5695                                       u16 flow_id, struct flow_keys *fkeys)
5696 {
5697         struct hclge_vport *vport = hclge_get_vport(handle);
5698         struct hclge_fd_rule_tuples new_tuples;
5699         struct hclge_dev *hdev = vport->back;
5700         struct hclge_fd_rule *rule;
5701         u16 tmp_queue_id;
5702         u16 bit_id;
5703         int ret;
5704
5705         if (!hnae3_dev_fd_supported(hdev))
5706                 return -EOPNOTSUPP;
5707
5708         memset(&new_tuples, 0, sizeof(new_tuples));
5709         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5710
5711         spin_lock_bh(&hdev->fd_rule_lock);
5712
5713         /* when there is already fd rule existed add by user,
5714          * arfs should not work
5715          */
5716         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5717                 spin_unlock_bh(&hdev->fd_rule_lock);
5718
5719                 return -EOPNOTSUPP;
5720         }
5721
5722         /* check is there flow director filter existed for this flow,
5723          * if not, create a new filter for it;
5724          * if filter exist with different queue id, modify the filter;
5725          * if filter exist with same queue id, do nothing
5726          */
5727         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5728         if (!rule) {
5729                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5730                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5731                         spin_unlock_bh(&hdev->fd_rule_lock);
5732
5733                         return -ENOSPC;
5734                 }
5735
5736                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5737                 if (!rule) {
5738                         spin_unlock_bh(&hdev->fd_rule_lock);
5739
5740                         return -ENOMEM;
5741                 }
5742
5743                 set_bit(bit_id, hdev->fd_bmap);
5744                 rule->location = bit_id;
5745                 rule->flow_id = flow_id;
5746                 rule->queue_id = queue_id;
5747                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5748                 ret = hclge_fd_config_rule(hdev, rule);
5749
5750                 spin_unlock_bh(&hdev->fd_rule_lock);
5751
5752                 if (ret)
5753                         return ret;
5754
5755                 return rule->location;
5756         }
5757
5758         spin_unlock_bh(&hdev->fd_rule_lock);
5759
5760         if (rule->queue_id == queue_id)
5761                 return rule->location;
5762
5763         tmp_queue_id = rule->queue_id;
5764         rule->queue_id = queue_id;
5765         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5766         if (ret) {
5767                 rule->queue_id = tmp_queue_id;
5768                 return ret;
5769         }
5770
5771         return rule->location;
5772 }
5773
5774 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5775 {
5776 #ifdef CONFIG_RFS_ACCEL
5777         struct hnae3_handle *handle = &hdev->vport[0].nic;
5778         struct hclge_fd_rule *rule;
5779         struct hlist_node *node;
5780         HLIST_HEAD(del_list);
5781
5782         spin_lock_bh(&hdev->fd_rule_lock);
5783         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5784                 spin_unlock_bh(&hdev->fd_rule_lock);
5785                 return;
5786         }
5787         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5788                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5789                                         rule->flow_id, rule->location)) {
5790                         hlist_del_init(&rule->rule_node);
5791                         hlist_add_head(&rule->rule_node, &del_list);
5792                         hdev->hclge_fd_rule_num--;
5793                         clear_bit(rule->location, hdev->fd_bmap);
5794                 }
5795         }
5796         spin_unlock_bh(&hdev->fd_rule_lock);
5797
5798         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5799                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5800                                      rule->location, NULL, false);
5801                 kfree(rule);
5802         }
5803 #endif
5804 }
5805
5806 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5807 {
5808 #ifdef CONFIG_RFS_ACCEL
5809         struct hclge_vport *vport = hclge_get_vport(handle);
5810         struct hclge_dev *hdev = vport->back;
5811
5812         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5813                 hclge_del_all_fd_entries(handle, true);
5814 #endif
5815 }
5816
5817 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5818 {
5819         struct hclge_vport *vport = hclge_get_vport(handle);
5820         struct hclge_dev *hdev = vport->back;
5821
5822         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5823                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5824 }
5825
5826 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5827 {
5828         struct hclge_vport *vport = hclge_get_vport(handle);
5829         struct hclge_dev *hdev = vport->back;
5830
5831         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5832 }
5833
5834 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5835 {
5836         struct hclge_vport *vport = hclge_get_vport(handle);
5837         struct hclge_dev *hdev = vport->back;
5838
5839         return hdev->rst_stats.hw_reset_done_cnt;
5840 }
5841
5842 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5843 {
5844         struct hclge_vport *vport = hclge_get_vport(handle);
5845         struct hclge_dev *hdev = vport->back;
5846         bool clear;
5847
5848         hdev->fd_en = enable;
5849         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5850         if (!enable)
5851                 hclge_del_all_fd_entries(handle, clear);
5852         else
5853                 hclge_restore_fd_entries(handle);
5854 }
5855
5856 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5857 {
5858         struct hclge_desc desc;
5859         struct hclge_config_mac_mode_cmd *req =
5860                 (struct hclge_config_mac_mode_cmd *)desc.data;
5861         u32 loop_en = 0;
5862         int ret;
5863
5864         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5865         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5866         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5867         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5868         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5869         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5870         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5871         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5872         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5873         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5874         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5875         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5876         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5877         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5878         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5879         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5880
5881         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5882         if (ret)
5883                 dev_err(&hdev->pdev->dev,
5884                         "mac enable fail, ret =%d.\n", ret);
5885 }
5886
5887 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5888 {
5889         struct hclge_config_mac_mode_cmd *req;
5890         struct hclge_desc desc;
5891         u32 loop_en;
5892         int ret;
5893
5894         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5895         /* 1 Read out the MAC mode config at first */
5896         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5897         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5898         if (ret) {
5899                 dev_err(&hdev->pdev->dev,
5900                         "mac loopback get fail, ret =%d.\n", ret);
5901                 return ret;
5902         }
5903
5904         /* 2 Then setup the loopback flag */
5905         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5906         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5907         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5908         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5909
5910         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5911
5912         /* 3 Config mac work mode with loopback flag
5913          * and its original configure parameters
5914          */
5915         hclge_cmd_reuse_desc(&desc, false);
5916         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5917         if (ret)
5918                 dev_err(&hdev->pdev->dev,
5919                         "mac loopback set fail, ret =%d.\n", ret);
5920         return ret;
5921 }
5922
5923 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5924                                      enum hnae3_loop loop_mode)
5925 {
5926 #define HCLGE_SERDES_RETRY_MS   10
5927 #define HCLGE_SERDES_RETRY_NUM  100
5928
5929 #define HCLGE_MAC_LINK_STATUS_MS   10
5930 #define HCLGE_MAC_LINK_STATUS_NUM  100
5931 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5932 #define HCLGE_MAC_LINK_STATUS_UP   1
5933
5934         struct hclge_serdes_lb_cmd *req;
5935         struct hclge_desc desc;
5936         int mac_link_ret = 0;
5937         int ret, i = 0;
5938         u8 loop_mode_b;
5939
5940         req = (struct hclge_serdes_lb_cmd *)desc.data;
5941         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5942
5943         switch (loop_mode) {
5944         case HNAE3_LOOP_SERIAL_SERDES:
5945                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5946                 break;
5947         case HNAE3_LOOP_PARALLEL_SERDES:
5948                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5949                 break;
5950         default:
5951                 dev_err(&hdev->pdev->dev,
5952                         "unsupported serdes loopback mode %d\n", loop_mode);
5953                 return -ENOTSUPP;
5954         }
5955
5956         if (en) {
5957                 req->enable = loop_mode_b;
5958                 req->mask = loop_mode_b;
5959                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5960         } else {
5961                 req->mask = loop_mode_b;
5962                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5963         }
5964
5965         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5966         if (ret) {
5967                 dev_err(&hdev->pdev->dev,
5968                         "serdes loopback set fail, ret = %d\n", ret);
5969                 return ret;
5970         }
5971
5972         do {
5973                 msleep(HCLGE_SERDES_RETRY_MS);
5974                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5975                                            true);
5976                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5977                 if (ret) {
5978                         dev_err(&hdev->pdev->dev,
5979                                 "serdes loopback get, ret = %d\n", ret);
5980                         return ret;
5981                 }
5982         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5983                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5984
5985         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5986                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5987                 return -EBUSY;
5988         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5989                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5990                 return -EIO;
5991         }
5992
5993         hclge_cfg_mac_mode(hdev, en);
5994
5995         i = 0;
5996         do {
5997                 /* serdes Internal loopback, independent of the network cable.*/
5998                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5999                 ret = hclge_get_mac_link_status(hdev);
6000                 if (ret == mac_link_ret)
6001                         return 0;
6002         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6003
6004         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6005
6006         return -EBUSY;
6007 }
6008
6009 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
6010                             int stream_id, bool enable)
6011 {
6012         struct hclge_desc desc;
6013         struct hclge_cfg_com_tqp_queue_cmd *req =
6014                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6015         int ret;
6016
6017         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6018         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6019         req->stream_id = cpu_to_le16(stream_id);
6020         req->enable |= enable << HCLGE_TQP_ENABLE_B;
6021
6022         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6023         if (ret)
6024                 dev_err(&hdev->pdev->dev,
6025                         "Tqp enable fail, status =%d.\n", ret);
6026         return ret;
6027 }
6028
6029 static int hclge_set_loopback(struct hnae3_handle *handle,
6030                               enum hnae3_loop loop_mode, bool en)
6031 {
6032         struct hclge_vport *vport = hclge_get_vport(handle);
6033         struct hnae3_knic_private_info *kinfo;
6034         struct hclge_dev *hdev = vport->back;
6035         int i, ret;
6036
6037         switch (loop_mode) {
6038         case HNAE3_LOOP_APP:
6039                 ret = hclge_set_app_loopback(hdev, en);
6040                 break;
6041         case HNAE3_LOOP_SERIAL_SERDES:
6042         case HNAE3_LOOP_PARALLEL_SERDES:
6043                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6044                 break;
6045         default:
6046                 ret = -ENOTSUPP;
6047                 dev_err(&hdev->pdev->dev,
6048                         "loop_mode %d is not supported\n", loop_mode);
6049                 break;
6050         }
6051
6052         if (ret)
6053                 return ret;
6054
6055         kinfo = &vport->nic.kinfo;
6056         for (i = 0; i < kinfo->num_tqps; i++) {
6057                 ret = hclge_tqp_enable(hdev, i, 0, en);
6058                 if (ret)
6059                         return ret;
6060         }
6061
6062         return 0;
6063 }
6064
6065 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6066 {
6067         struct hclge_vport *vport = hclge_get_vport(handle);
6068         struct hnae3_knic_private_info *kinfo;
6069         struct hnae3_queue *queue;
6070         struct hclge_tqp *tqp;
6071         int i;
6072
6073         kinfo = &vport->nic.kinfo;
6074         for (i = 0; i < kinfo->num_tqps; i++) {
6075                 queue = handle->kinfo.tqp[i];
6076                 tqp = container_of(queue, struct hclge_tqp, q);
6077                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6078         }
6079 }
6080
6081 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6082 {
6083         struct hclge_vport *vport = hclge_get_vport(handle);
6084         struct hclge_dev *hdev = vport->back;
6085
6086         if (enable) {
6087                 mod_timer(&hdev->service_timer, jiffies + HZ);
6088         } else {
6089                 del_timer_sync(&hdev->service_timer);
6090                 cancel_work_sync(&hdev->service_task);
6091                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6092         }
6093 }
6094
6095 static int hclge_ae_start(struct hnae3_handle *handle)
6096 {
6097         struct hclge_vport *vport = hclge_get_vport(handle);
6098         struct hclge_dev *hdev = vport->back;
6099
6100         /* mac enable */
6101         hclge_cfg_mac_mode(hdev, true);
6102         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6103         hdev->hw.mac.link = 0;
6104
6105         /* reset tqp stats */
6106         hclge_reset_tqp_stats(handle);
6107
6108         hclge_mac_start_phy(hdev);
6109
6110         return 0;
6111 }
6112
6113 static void hclge_ae_stop(struct hnae3_handle *handle)
6114 {
6115         struct hclge_vport *vport = hclge_get_vport(handle);
6116         struct hclge_dev *hdev = vport->back;
6117         int i;
6118
6119         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6120
6121         hclge_clear_arfs_rules(handle);
6122
6123         /* If it is not PF reset, the firmware will disable the MAC,
6124          * so it only need to stop phy here.
6125          */
6126         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6127             hdev->reset_type != HNAE3_FUNC_RESET) {
6128                 hclge_mac_stop_phy(hdev);
6129                 return;
6130         }
6131
6132         for (i = 0; i < handle->kinfo.num_tqps; i++)
6133                 hclge_reset_tqp(handle, i);
6134
6135         /* Mac disable */
6136         hclge_cfg_mac_mode(hdev, false);
6137
6138         hclge_mac_stop_phy(hdev);
6139
6140         /* reset tqp stats */
6141         hclge_reset_tqp_stats(handle);
6142         hclge_update_link_status(hdev);
6143 }
6144
6145 int hclge_vport_start(struct hclge_vport *vport)
6146 {
6147         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6148         vport->last_active_jiffies = jiffies;
6149         return 0;
6150 }
6151
6152 void hclge_vport_stop(struct hclge_vport *vport)
6153 {
6154         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6155 }
6156
6157 static int hclge_client_start(struct hnae3_handle *handle)
6158 {
6159         struct hclge_vport *vport = hclge_get_vport(handle);
6160
6161         return hclge_vport_start(vport);
6162 }
6163
6164 static void hclge_client_stop(struct hnae3_handle *handle)
6165 {
6166         struct hclge_vport *vport = hclge_get_vport(handle);
6167
6168         hclge_vport_stop(vport);
6169 }
6170
6171 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6172                                          u16 cmdq_resp, u8  resp_code,
6173                                          enum hclge_mac_vlan_tbl_opcode op)
6174 {
6175         struct hclge_dev *hdev = vport->back;
6176         int return_status = -EIO;
6177
6178         if (cmdq_resp) {
6179                 dev_err(&hdev->pdev->dev,
6180                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6181                         cmdq_resp);
6182                 return -EIO;
6183         }
6184
6185         if (op == HCLGE_MAC_VLAN_ADD) {
6186                 if ((!resp_code) || (resp_code == 1)) {
6187                         return_status = 0;
6188                 } else if (resp_code == 2) {
6189                         return_status = -ENOSPC;
6190                         dev_err(&hdev->pdev->dev,
6191                                 "add mac addr failed for uc_overflow.\n");
6192                 } else if (resp_code == 3) {
6193                         return_status = -ENOSPC;
6194                         dev_err(&hdev->pdev->dev,
6195                                 "add mac addr failed for mc_overflow.\n");
6196                 } else {
6197                         dev_err(&hdev->pdev->dev,
6198                                 "add mac addr failed for undefined, code=%d.\n",
6199                                 resp_code);
6200                 }
6201         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6202                 if (!resp_code) {
6203                         return_status = 0;
6204                 } else if (resp_code == 1) {
6205                         return_status = -ENOENT;
6206                         dev_dbg(&hdev->pdev->dev,
6207                                 "remove mac addr failed for miss.\n");
6208                 } else {
6209                         dev_err(&hdev->pdev->dev,
6210                                 "remove mac addr failed for undefined, code=%d.\n",
6211                                 resp_code);
6212                 }
6213         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6214                 if (!resp_code) {
6215                         return_status = 0;
6216                 } else if (resp_code == 1) {
6217                         return_status = -ENOENT;
6218                         dev_dbg(&hdev->pdev->dev,
6219                                 "lookup mac addr failed for miss.\n");
6220                 } else {
6221                         dev_err(&hdev->pdev->dev,
6222                                 "lookup mac addr failed for undefined, code=%d.\n",
6223                                 resp_code);
6224                 }
6225         } else {
6226                 return_status = -EINVAL;
6227                 dev_err(&hdev->pdev->dev,
6228                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6229                         op);
6230         }
6231
6232         return return_status;
6233 }
6234
6235 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6236 {
6237         int word_num;
6238         int bit_num;
6239
6240         if (vfid > 255 || vfid < 0)
6241                 return -EIO;
6242
6243         if (vfid >= 0 && vfid <= 191) {
6244                 word_num = vfid / 32;
6245                 bit_num  = vfid % 32;
6246                 if (clr)
6247                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6248                 else
6249                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6250         } else {
6251                 word_num = (vfid - 192) / 32;
6252                 bit_num  = vfid % 32;
6253                 if (clr)
6254                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6255                 else
6256                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6257         }
6258
6259         return 0;
6260 }
6261
6262 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6263 {
6264 #define HCLGE_DESC_NUMBER 3
6265 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6266         int i, j;
6267
6268         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6269                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6270                         if (desc[i].data[j])
6271                                 return false;
6272
6273         return true;
6274 }
6275
6276 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6277                                    const u8 *addr, bool is_mc)
6278 {
6279         const unsigned char *mac_addr = addr;
6280         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6281                        (mac_addr[0]) | (mac_addr[1] << 8);
6282         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6283
6284         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6285         if (is_mc) {
6286                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6287                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6288         }
6289
6290         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6291         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6292 }
6293
6294 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6295                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6296 {
6297         struct hclge_dev *hdev = vport->back;
6298         struct hclge_desc desc;
6299         u8 resp_code;
6300         u16 retval;
6301         int ret;
6302
6303         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6304
6305         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6306
6307         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6308         if (ret) {
6309                 dev_err(&hdev->pdev->dev,
6310                         "del mac addr failed for cmd_send, ret =%d.\n",
6311                         ret);
6312                 return ret;
6313         }
6314         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6315         retval = le16_to_cpu(desc.retval);
6316
6317         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6318                                              HCLGE_MAC_VLAN_REMOVE);
6319 }
6320
6321 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6322                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6323                                      struct hclge_desc *desc,
6324                                      bool is_mc)
6325 {
6326         struct hclge_dev *hdev = vport->back;
6327         u8 resp_code;
6328         u16 retval;
6329         int ret;
6330
6331         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6332         if (is_mc) {
6333                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6334                 memcpy(desc[0].data,
6335                        req,
6336                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6337                 hclge_cmd_setup_basic_desc(&desc[1],
6338                                            HCLGE_OPC_MAC_VLAN_ADD,
6339                                            true);
6340                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6341                 hclge_cmd_setup_basic_desc(&desc[2],
6342                                            HCLGE_OPC_MAC_VLAN_ADD,
6343                                            true);
6344                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6345         } else {
6346                 memcpy(desc[0].data,
6347                        req,
6348                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6349                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6350         }
6351         if (ret) {
6352                 dev_err(&hdev->pdev->dev,
6353                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6354                         ret);
6355                 return ret;
6356         }
6357         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6358         retval = le16_to_cpu(desc[0].retval);
6359
6360         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6361                                              HCLGE_MAC_VLAN_LKUP);
6362 }
6363
6364 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6365                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6366                                   struct hclge_desc *mc_desc)
6367 {
6368         struct hclge_dev *hdev = vport->back;
6369         int cfg_status;
6370         u8 resp_code;
6371         u16 retval;
6372         int ret;
6373
6374         if (!mc_desc) {
6375                 struct hclge_desc desc;
6376
6377                 hclge_cmd_setup_basic_desc(&desc,
6378                                            HCLGE_OPC_MAC_VLAN_ADD,
6379                                            false);
6380                 memcpy(desc.data, req,
6381                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6382                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6383                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6384                 retval = le16_to_cpu(desc.retval);
6385
6386                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6387                                                            resp_code,
6388                                                            HCLGE_MAC_VLAN_ADD);
6389         } else {
6390                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6391                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6392                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6393                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6394                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6395                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6396                 memcpy(mc_desc[0].data, req,
6397                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6398                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6399                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6400                 retval = le16_to_cpu(mc_desc[0].retval);
6401
6402                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6403                                                            resp_code,
6404                                                            HCLGE_MAC_VLAN_ADD);
6405         }
6406
6407         if (ret) {
6408                 dev_err(&hdev->pdev->dev,
6409                         "add mac addr failed for cmd_send, ret =%d.\n",
6410                         ret);
6411                 return ret;
6412         }
6413
6414         return cfg_status;
6415 }
6416
6417 static int hclge_init_umv_space(struct hclge_dev *hdev)
6418 {
6419         u16 allocated_size = 0;
6420         int ret;
6421
6422         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6423                                   true);
6424         if (ret)
6425                 return ret;
6426
6427         if (allocated_size < hdev->wanted_umv_size)
6428                 dev_warn(&hdev->pdev->dev,
6429                          "Alloc umv space failed, want %d, get %d\n",
6430                          hdev->wanted_umv_size, allocated_size);
6431
6432         mutex_init(&hdev->umv_mutex);
6433         hdev->max_umv_size = allocated_size;
6434         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6435         hdev->share_umv_size = hdev->priv_umv_size +
6436                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6437
6438         return 0;
6439 }
6440
6441 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6442 {
6443         int ret;
6444
6445         if (hdev->max_umv_size > 0) {
6446                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6447                                           false);
6448                 if (ret)
6449                         return ret;
6450                 hdev->max_umv_size = 0;
6451         }
6452         mutex_destroy(&hdev->umv_mutex);
6453
6454         return 0;
6455 }
6456
6457 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6458                                u16 *allocated_size, bool is_alloc)
6459 {
6460         struct hclge_umv_spc_alc_cmd *req;
6461         struct hclge_desc desc;
6462         int ret;
6463
6464         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6465         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6466         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6467         req->space_size = cpu_to_le32(space_size);
6468
6469         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6470         if (ret) {
6471                 dev_err(&hdev->pdev->dev,
6472                         "%s umv space failed for cmd_send, ret =%d\n",
6473                         is_alloc ? "allocate" : "free", ret);
6474                 return ret;
6475         }
6476
6477         if (is_alloc && allocated_size)
6478                 *allocated_size = le32_to_cpu(desc.data[1]);
6479
6480         return 0;
6481 }
6482
6483 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6484 {
6485         struct hclge_vport *vport;
6486         int i;
6487
6488         for (i = 0; i < hdev->num_alloc_vport; i++) {
6489                 vport = &hdev->vport[i];
6490                 vport->used_umv_num = 0;
6491         }
6492
6493         mutex_lock(&hdev->umv_mutex);
6494         hdev->share_umv_size = hdev->priv_umv_size +
6495                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6496         mutex_unlock(&hdev->umv_mutex);
6497 }
6498
6499 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6500 {
6501         struct hclge_dev *hdev = vport->back;
6502         bool is_full;
6503
6504         mutex_lock(&hdev->umv_mutex);
6505         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6506                    hdev->share_umv_size == 0);
6507         mutex_unlock(&hdev->umv_mutex);
6508
6509         return is_full;
6510 }
6511
6512 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6513 {
6514         struct hclge_dev *hdev = vport->back;
6515
6516         mutex_lock(&hdev->umv_mutex);
6517         if (is_free) {
6518                 if (vport->used_umv_num > hdev->priv_umv_size)
6519                         hdev->share_umv_size++;
6520
6521                 if (vport->used_umv_num > 0)
6522                         vport->used_umv_num--;
6523         } else {
6524                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6525                     hdev->share_umv_size > 0)
6526                         hdev->share_umv_size--;
6527                 vport->used_umv_num++;
6528         }
6529         mutex_unlock(&hdev->umv_mutex);
6530 }
6531
6532 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6533                              const unsigned char *addr)
6534 {
6535         struct hclge_vport *vport = hclge_get_vport(handle);
6536
6537         return hclge_add_uc_addr_common(vport, addr);
6538 }
6539
6540 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6541                              const unsigned char *addr)
6542 {
6543         struct hclge_dev *hdev = vport->back;
6544         struct hclge_mac_vlan_tbl_entry_cmd req;
6545         struct hclge_desc desc;
6546         u16 egress_port = 0;
6547         int ret;
6548
6549         /* mac addr check */
6550         if (is_zero_ether_addr(addr) ||
6551             is_broadcast_ether_addr(addr) ||
6552             is_multicast_ether_addr(addr)) {
6553                 dev_err(&hdev->pdev->dev,
6554                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6555                          addr,
6556                          is_zero_ether_addr(addr),
6557                          is_broadcast_ether_addr(addr),
6558                          is_multicast_ether_addr(addr));
6559                 return -EINVAL;
6560         }
6561
6562         memset(&req, 0, sizeof(req));
6563
6564         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6565                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6566
6567         req.egress_port = cpu_to_le16(egress_port);
6568
6569         hclge_prepare_mac_addr(&req, addr, false);
6570
6571         /* Lookup the mac address in the mac_vlan table, and add
6572          * it if the entry is inexistent. Repeated unicast entry
6573          * is not allowed in the mac vlan table.
6574          */
6575         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6576         if (ret == -ENOENT) {
6577                 if (!hclge_is_umv_space_full(vport)) {
6578                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6579                         if (!ret)
6580                                 hclge_update_umv_space(vport, false);
6581                         return ret;
6582                 }
6583
6584                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6585                         hdev->priv_umv_size);
6586
6587                 return -ENOSPC;
6588         }
6589
6590         /* check if we just hit the duplicate */
6591         if (!ret) {
6592                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6593                          vport->vport_id, addr);
6594                 return 0;
6595         }
6596
6597         dev_err(&hdev->pdev->dev,
6598                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6599                 addr);
6600
6601         return ret;
6602 }
6603
6604 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6605                             const unsigned char *addr)
6606 {
6607         struct hclge_vport *vport = hclge_get_vport(handle);
6608
6609         return hclge_rm_uc_addr_common(vport, addr);
6610 }
6611
6612 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6613                             const unsigned char *addr)
6614 {
6615         struct hclge_dev *hdev = vport->back;
6616         struct hclge_mac_vlan_tbl_entry_cmd req;
6617         int ret;
6618
6619         /* mac addr check */
6620         if (is_zero_ether_addr(addr) ||
6621             is_broadcast_ether_addr(addr) ||
6622             is_multicast_ether_addr(addr)) {
6623                 dev_dbg(&hdev->pdev->dev,
6624                         "Remove mac err! invalid mac:%pM.\n",
6625                          addr);
6626                 return -EINVAL;
6627         }
6628
6629         memset(&req, 0, sizeof(req));
6630         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6631         hclge_prepare_mac_addr(&req, addr, false);
6632         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6633         if (!ret)
6634                 hclge_update_umv_space(vport, true);
6635
6636         return ret;
6637 }
6638
6639 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6640                              const unsigned char *addr)
6641 {
6642         struct hclge_vport *vport = hclge_get_vport(handle);
6643
6644         return hclge_add_mc_addr_common(vport, addr);
6645 }
6646
6647 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6648                              const unsigned char *addr)
6649 {
6650         struct hclge_dev *hdev = vport->back;
6651         struct hclge_mac_vlan_tbl_entry_cmd req;
6652         struct hclge_desc desc[3];
6653         int status;
6654
6655         /* mac addr check */
6656         if (!is_multicast_ether_addr(addr)) {
6657                 dev_err(&hdev->pdev->dev,
6658                         "Add mc mac err! invalid mac:%pM.\n",
6659                          addr);
6660                 return -EINVAL;
6661         }
6662         memset(&req, 0, sizeof(req));
6663         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6664         hclge_prepare_mac_addr(&req, addr, true);
6665         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6666         if (!status) {
6667                 /* This mac addr exist, update VFID for it */
6668                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6669                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6670         } else {
6671                 /* This mac addr do not exist, add new entry for it */
6672                 memset(desc[0].data, 0, sizeof(desc[0].data));
6673                 memset(desc[1].data, 0, sizeof(desc[0].data));
6674                 memset(desc[2].data, 0, sizeof(desc[0].data));
6675                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6676                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6677         }
6678
6679         if (status == -ENOSPC)
6680                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6681
6682         return status;
6683 }
6684
6685 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6686                             const unsigned char *addr)
6687 {
6688         struct hclge_vport *vport = hclge_get_vport(handle);
6689
6690         return hclge_rm_mc_addr_common(vport, addr);
6691 }
6692
6693 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6694                             const unsigned char *addr)
6695 {
6696         struct hclge_dev *hdev = vport->back;
6697         struct hclge_mac_vlan_tbl_entry_cmd req;
6698         enum hclge_cmd_status status;
6699         struct hclge_desc desc[3];
6700
6701         /* mac addr check */
6702         if (!is_multicast_ether_addr(addr)) {
6703                 dev_dbg(&hdev->pdev->dev,
6704                         "Remove mc mac err! invalid mac:%pM.\n",
6705                          addr);
6706                 return -EINVAL;
6707         }
6708
6709         memset(&req, 0, sizeof(req));
6710         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6711         hclge_prepare_mac_addr(&req, addr, true);
6712         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6713         if (!status) {
6714                 /* This mac addr exist, remove this handle's VFID for it */
6715                 hclge_update_desc_vfid(desc, vport->vport_id, true);
6716
6717                 if (hclge_is_all_function_id_zero(desc))
6718                         /* All the vfid is zero, so need to delete this entry */
6719                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6720                 else
6721                         /* Not all the vfid is zero, update the vfid */
6722                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6723
6724         } else {
6725                 /* Maybe this mac address is in mta table, but it cannot be
6726                  * deleted here because an entry of mta represents an address
6727                  * range rather than a specific address. the delete action to
6728                  * all entries will take effect in update_mta_status called by
6729                  * hns3_nic_set_rx_mode.
6730                  */
6731                 status = 0;
6732         }
6733
6734         return status;
6735 }
6736
6737 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6738                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6739 {
6740         struct hclge_vport_mac_addr_cfg *mac_cfg;
6741         struct list_head *list;
6742
6743         if (!vport->vport_id)
6744                 return;
6745
6746         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6747         if (!mac_cfg)
6748                 return;
6749
6750         mac_cfg->hd_tbl_status = true;
6751         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6752
6753         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6754                &vport->uc_mac_list : &vport->mc_mac_list;
6755
6756         list_add_tail(&mac_cfg->node, list);
6757 }
6758
6759 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6760                               bool is_write_tbl,
6761                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6762 {
6763         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6764         struct list_head *list;
6765         bool uc_flag, mc_flag;
6766
6767         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6768                &vport->uc_mac_list : &vport->mc_mac_list;
6769
6770         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6771         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6772
6773         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6774                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6775                         if (uc_flag && mac_cfg->hd_tbl_status)
6776                                 hclge_rm_uc_addr_common(vport, mac_addr);
6777
6778                         if (mc_flag && mac_cfg->hd_tbl_status)
6779                                 hclge_rm_mc_addr_common(vport, mac_addr);
6780
6781                         list_del(&mac_cfg->node);
6782                         kfree(mac_cfg);
6783                         break;
6784                 }
6785         }
6786 }
6787
6788 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6789                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6790 {
6791         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6792         struct list_head *list;
6793
6794         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6795                &vport->uc_mac_list : &vport->mc_mac_list;
6796
6797         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6798                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6799                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6800
6801                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6802                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6803
6804                 mac_cfg->hd_tbl_status = false;
6805                 if (is_del_list) {
6806                         list_del(&mac_cfg->node);
6807                         kfree(mac_cfg);
6808                 }
6809         }
6810 }
6811
6812 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6813 {
6814         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6815         struct hclge_vport *vport;
6816         int i;
6817
6818         mutex_lock(&hdev->vport_cfg_mutex);
6819         for (i = 0; i < hdev->num_alloc_vport; i++) {
6820                 vport = &hdev->vport[i];
6821                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6822                         list_del(&mac->node);
6823                         kfree(mac);
6824                 }
6825
6826                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6827                         list_del(&mac->node);
6828                         kfree(mac);
6829                 }
6830         }
6831         mutex_unlock(&hdev->vport_cfg_mutex);
6832 }
6833
6834 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6835                                               u16 cmdq_resp, u8 resp_code)
6836 {
6837 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6838 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6839 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6840 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6841
6842         int return_status;
6843
6844         if (cmdq_resp) {
6845                 dev_err(&hdev->pdev->dev,
6846                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6847                         cmdq_resp);
6848                 return -EIO;
6849         }
6850
6851         switch (resp_code) {
6852         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6853         case HCLGE_ETHERTYPE_ALREADY_ADD:
6854                 return_status = 0;
6855                 break;
6856         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6857                 dev_err(&hdev->pdev->dev,
6858                         "add mac ethertype failed for manager table overflow.\n");
6859                 return_status = -EIO;
6860                 break;
6861         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6862                 dev_err(&hdev->pdev->dev,
6863                         "add mac ethertype failed for key conflict.\n");
6864                 return_status = -EIO;
6865                 break;
6866         default:
6867                 dev_err(&hdev->pdev->dev,
6868                         "add mac ethertype failed for undefined, code=%d.\n",
6869                         resp_code);
6870                 return_status = -EIO;
6871         }
6872
6873         return return_status;
6874 }
6875
6876 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6877                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6878 {
6879         struct hclge_desc desc;
6880         u8 resp_code;
6881         u16 retval;
6882         int ret;
6883
6884         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6885         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6886
6887         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6888         if (ret) {
6889                 dev_err(&hdev->pdev->dev,
6890                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6891                         ret);
6892                 return ret;
6893         }
6894
6895         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6896         retval = le16_to_cpu(desc.retval);
6897
6898         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6899 }
6900
6901 static int init_mgr_tbl(struct hclge_dev *hdev)
6902 {
6903         int ret;
6904         int i;
6905
6906         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6907                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6908                 if (ret) {
6909                         dev_err(&hdev->pdev->dev,
6910                                 "add mac ethertype failed, ret =%d.\n",
6911                                 ret);
6912                         return ret;
6913                 }
6914         }
6915
6916         return 0;
6917 }
6918
6919 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6920 {
6921         struct hclge_vport *vport = hclge_get_vport(handle);
6922         struct hclge_dev *hdev = vport->back;
6923
6924         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6925 }
6926
6927 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6928                               bool is_first)
6929 {
6930         const unsigned char *new_addr = (const unsigned char *)p;
6931         struct hclge_vport *vport = hclge_get_vport(handle);
6932         struct hclge_dev *hdev = vport->back;
6933         int ret;
6934
6935         /* mac addr check */
6936         if (is_zero_ether_addr(new_addr) ||
6937             is_broadcast_ether_addr(new_addr) ||
6938             is_multicast_ether_addr(new_addr)) {
6939                 dev_err(&hdev->pdev->dev,
6940                         "Change uc mac err! invalid mac:%p.\n",
6941                          new_addr);
6942                 return -EINVAL;
6943         }
6944
6945         if ((!is_first || is_kdump_kernel()) &&
6946             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6947                 dev_warn(&hdev->pdev->dev,
6948                          "remove old uc mac address fail.\n");
6949
6950         ret = hclge_add_uc_addr(handle, new_addr);
6951         if (ret) {
6952                 dev_err(&hdev->pdev->dev,
6953                         "add uc mac address fail, ret =%d.\n",
6954                         ret);
6955
6956                 if (!is_first &&
6957                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6958                         dev_err(&hdev->pdev->dev,
6959                                 "restore uc mac address fail.\n");
6960
6961                 return -EIO;
6962         }
6963
6964         ret = hclge_pause_addr_cfg(hdev, new_addr);
6965         if (ret) {
6966                 dev_err(&hdev->pdev->dev,
6967                         "configure mac pause address fail, ret =%d.\n",
6968                         ret);
6969                 return -EIO;
6970         }
6971
6972         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6973
6974         return 0;
6975 }
6976
6977 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6978                           int cmd)
6979 {
6980         struct hclge_vport *vport = hclge_get_vport(handle);
6981         struct hclge_dev *hdev = vport->back;
6982
6983         if (!hdev->hw.mac.phydev)
6984                 return -EOPNOTSUPP;
6985
6986         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6987 }
6988
6989 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6990                                       u8 fe_type, bool filter_en, u8 vf_id)
6991 {
6992         struct hclge_vlan_filter_ctrl_cmd *req;
6993         struct hclge_desc desc;
6994         int ret;
6995
6996         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6997
6998         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6999         req->vlan_type = vlan_type;
7000         req->vlan_fe = filter_en ? fe_type : 0;
7001         req->vf_id = vf_id;
7002
7003         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7004         if (ret)
7005                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7006                         ret);
7007
7008         return ret;
7009 }
7010
7011 #define HCLGE_FILTER_TYPE_VF            0
7012 #define HCLGE_FILTER_TYPE_PORT          1
7013 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7014 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7015 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7016 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7017 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7018 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7019                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7020 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7021                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7022
7023 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7024 {
7025         struct hclge_vport *vport = hclge_get_vport(handle);
7026         struct hclge_dev *hdev = vport->back;
7027
7028         if (hdev->pdev->revision >= 0x21) {
7029                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7030                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7031                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7032                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7033         } else {
7034                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7035                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7036                                            0);
7037         }
7038         if (enable)
7039                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7040         else
7041                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7042 }
7043
7044 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7045                                     bool is_kill, u16 vlan, u8 qos,
7046                                     __be16 proto)
7047 {
7048 #define HCLGE_MAX_VF_BYTES  16
7049         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7050         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7051         struct hclge_desc desc[2];
7052         u8 vf_byte_val;
7053         u8 vf_byte_off;
7054         int ret;
7055
7056         hclge_cmd_setup_basic_desc(&desc[0],
7057                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7058         hclge_cmd_setup_basic_desc(&desc[1],
7059                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7060
7061         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7062
7063         vf_byte_off = vfid / 8;
7064         vf_byte_val = 1 << (vfid % 8);
7065
7066         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7067         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7068
7069         req0->vlan_id  = cpu_to_le16(vlan);
7070         req0->vlan_cfg = is_kill;
7071
7072         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7073                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7074         else
7075                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7076
7077         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7078         if (ret) {
7079                 dev_err(&hdev->pdev->dev,
7080                         "Send vf vlan command fail, ret =%d.\n",
7081                         ret);
7082                 return ret;
7083         }
7084
7085         if (!is_kill) {
7086 #define HCLGE_VF_VLAN_NO_ENTRY  2
7087                 if (!req0->resp_code || req0->resp_code == 1)
7088                         return 0;
7089
7090                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7091                         dev_warn(&hdev->pdev->dev,
7092                                  "vf vlan table is full, vf vlan filter is disabled\n");
7093                         return 0;
7094                 }
7095
7096                 dev_err(&hdev->pdev->dev,
7097                         "Add vf vlan filter fail, ret =%d.\n",
7098                         req0->resp_code);
7099         } else {
7100 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7101                 if (!req0->resp_code)
7102                         return 0;
7103
7104                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7105                         dev_warn(&hdev->pdev->dev,
7106                                  "vlan %d filter is not in vf vlan table\n",
7107                                  vlan);
7108                         return 0;
7109                 }
7110
7111                 dev_err(&hdev->pdev->dev,
7112                         "Kill vf vlan filter fail, ret =%d.\n",
7113                         req0->resp_code);
7114         }
7115
7116         return -EIO;
7117 }
7118
7119 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7120                                       u16 vlan_id, bool is_kill)
7121 {
7122         struct hclge_vlan_filter_pf_cfg_cmd *req;
7123         struct hclge_desc desc;
7124         u8 vlan_offset_byte_val;
7125         u8 vlan_offset_byte;
7126         u8 vlan_offset_160;
7127         int ret;
7128
7129         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7130
7131         vlan_offset_160 = vlan_id / 160;
7132         vlan_offset_byte = (vlan_id % 160) / 8;
7133         vlan_offset_byte_val = 1 << (vlan_id % 8);
7134
7135         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7136         req->vlan_offset = vlan_offset_160;
7137         req->vlan_cfg = is_kill;
7138         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7139
7140         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7141         if (ret)
7142                 dev_err(&hdev->pdev->dev,
7143                         "port vlan command, send fail, ret =%d.\n", ret);
7144         return ret;
7145 }
7146
7147 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7148                                     u16 vport_id, u16 vlan_id, u8 qos,
7149                                     bool is_kill)
7150 {
7151         u16 vport_idx, vport_num = 0;
7152         int ret;
7153
7154         if (is_kill && !vlan_id)
7155                 return 0;
7156
7157         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7158                                        0, proto);
7159         if (ret) {
7160                 dev_err(&hdev->pdev->dev,
7161                         "Set %d vport vlan filter config fail, ret =%d.\n",
7162                         vport_id, ret);
7163                 return ret;
7164         }
7165
7166         /* vlan 0 may be added twice when 8021q module is enabled */
7167         if (!is_kill && !vlan_id &&
7168             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7169                 return 0;
7170
7171         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7172                 dev_err(&hdev->pdev->dev,
7173                         "Add port vlan failed, vport %d is already in vlan %d\n",
7174                         vport_id, vlan_id);
7175                 return -EINVAL;
7176         }
7177
7178         if (is_kill &&
7179             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7180                 dev_err(&hdev->pdev->dev,
7181                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7182                         vport_id, vlan_id);
7183                 return -EINVAL;
7184         }
7185
7186         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7187                 vport_num++;
7188
7189         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7190                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7191                                                  is_kill);
7192
7193         return ret;
7194 }
7195
7196 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7197 {
7198         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7199         struct hclge_vport_vtag_tx_cfg_cmd *req;
7200         struct hclge_dev *hdev = vport->back;
7201         struct hclge_desc desc;
7202         int status;
7203
7204         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7205
7206         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7207         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7208         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7209         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7210                       vcfg->accept_tag1 ? 1 : 0);
7211         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7212                       vcfg->accept_untag1 ? 1 : 0);
7213         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7214                       vcfg->accept_tag2 ? 1 : 0);
7215         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7216                       vcfg->accept_untag2 ? 1 : 0);
7217         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7218                       vcfg->insert_tag1_en ? 1 : 0);
7219         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7220                       vcfg->insert_tag2_en ? 1 : 0);
7221         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7222
7223         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7224         req->vf_bitmap[req->vf_offset] =
7225                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7226
7227         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7228         if (status)
7229                 dev_err(&hdev->pdev->dev,
7230                         "Send port txvlan cfg command fail, ret =%d\n",
7231                         status);
7232
7233         return status;
7234 }
7235
7236 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7237 {
7238         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7239         struct hclge_vport_vtag_rx_cfg_cmd *req;
7240         struct hclge_dev *hdev = vport->back;
7241         struct hclge_desc desc;
7242         int status;
7243
7244         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7245
7246         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7247         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7248                       vcfg->strip_tag1_en ? 1 : 0);
7249         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7250                       vcfg->strip_tag2_en ? 1 : 0);
7251         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7252                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7253         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7254                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7255
7256         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7257         req->vf_bitmap[req->vf_offset] =
7258                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7259
7260         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7261         if (status)
7262                 dev_err(&hdev->pdev->dev,
7263                         "Send port rxvlan cfg command fail, ret =%d\n",
7264                         status);
7265
7266         return status;
7267 }
7268
7269 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7270                                   u16 port_base_vlan_state,
7271                                   u16 vlan_tag)
7272 {
7273         int ret;
7274
7275         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7276                 vport->txvlan_cfg.accept_tag1 = true;
7277                 vport->txvlan_cfg.insert_tag1_en = false;
7278                 vport->txvlan_cfg.default_tag1 = 0;
7279         } else {
7280                 vport->txvlan_cfg.accept_tag1 = false;
7281                 vport->txvlan_cfg.insert_tag1_en = true;
7282                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7283         }
7284
7285         vport->txvlan_cfg.accept_untag1 = true;
7286
7287         /* accept_tag2 and accept_untag2 are not supported on
7288          * pdev revision(0x20), new revision support them,
7289          * this two fields can not be configured by user.
7290          */
7291         vport->txvlan_cfg.accept_tag2 = true;
7292         vport->txvlan_cfg.accept_untag2 = true;
7293         vport->txvlan_cfg.insert_tag2_en = false;
7294         vport->txvlan_cfg.default_tag2 = 0;
7295
7296         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7297                 vport->rxvlan_cfg.strip_tag1_en = false;
7298                 vport->rxvlan_cfg.strip_tag2_en =
7299                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7300         } else {
7301                 vport->rxvlan_cfg.strip_tag1_en =
7302                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7303                 vport->rxvlan_cfg.strip_tag2_en = true;
7304         }
7305         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7306         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7307
7308         ret = hclge_set_vlan_tx_offload_cfg(vport);
7309         if (ret)
7310                 return ret;
7311
7312         return hclge_set_vlan_rx_offload_cfg(vport);
7313 }
7314
7315 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7316 {
7317         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7318         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7319         struct hclge_desc desc;
7320         int status;
7321
7322         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7323         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7324         rx_req->ot_fst_vlan_type =
7325                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7326         rx_req->ot_sec_vlan_type =
7327                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7328         rx_req->in_fst_vlan_type =
7329                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7330         rx_req->in_sec_vlan_type =
7331                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7332
7333         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7334         if (status) {
7335                 dev_err(&hdev->pdev->dev,
7336                         "Send rxvlan protocol type command fail, ret =%d\n",
7337                         status);
7338                 return status;
7339         }
7340
7341         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7342
7343         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7344         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7345         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7346
7347         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7348         if (status)
7349                 dev_err(&hdev->pdev->dev,
7350                         "Send txvlan protocol type command fail, ret =%d\n",
7351                         status);
7352
7353         return status;
7354 }
7355
7356 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7357 {
7358 #define HCLGE_DEF_VLAN_TYPE             0x8100
7359
7360         struct hnae3_handle *handle = &hdev->vport[0].nic;
7361         struct hclge_vport *vport;
7362         int ret;
7363         int i;
7364
7365         if (hdev->pdev->revision >= 0x21) {
7366                 /* for revision 0x21, vf vlan filter is per function */
7367                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7368                         vport = &hdev->vport[i];
7369                         ret = hclge_set_vlan_filter_ctrl(hdev,
7370                                                          HCLGE_FILTER_TYPE_VF,
7371                                                          HCLGE_FILTER_FE_EGRESS,
7372                                                          true,
7373                                                          vport->vport_id);
7374                         if (ret)
7375                                 return ret;
7376                 }
7377
7378                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7379                                                  HCLGE_FILTER_FE_INGRESS, true,
7380                                                  0);
7381                 if (ret)
7382                         return ret;
7383         } else {
7384                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7385                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7386                                                  true, 0);
7387                 if (ret)
7388                         return ret;
7389         }
7390
7391         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7392
7393         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7394         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7395         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7396         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7397         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7398         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7399
7400         ret = hclge_set_vlan_protocol_type(hdev);
7401         if (ret)
7402                 return ret;
7403
7404         for (i = 0; i < hdev->num_alloc_vport; i++) {
7405                 u16 vlan_tag;
7406
7407                 vport = &hdev->vport[i];
7408                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7409
7410                 ret = hclge_vlan_offload_cfg(vport,
7411                                              vport->port_base_vlan_cfg.state,
7412                                              vlan_tag);
7413                 if (ret)
7414                         return ret;
7415         }
7416
7417         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7418 }
7419
7420 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7421                                        bool writen_to_tbl)
7422 {
7423         struct hclge_vport_vlan_cfg *vlan;
7424
7425         /* vlan 0 is reserved */
7426         if (!vlan_id)
7427                 return;
7428
7429         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7430         if (!vlan)
7431                 return;
7432
7433         vlan->hd_tbl_status = writen_to_tbl;
7434         vlan->vlan_id = vlan_id;
7435
7436         list_add_tail(&vlan->node, &vport->vlan_list);
7437 }
7438
7439 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7440 {
7441         struct hclge_vport_vlan_cfg *vlan, *tmp;
7442         struct hclge_dev *hdev = vport->back;
7443         int ret;
7444
7445         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7446                 if (!vlan->hd_tbl_status) {
7447                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7448                                                        vport->vport_id,
7449                                                        vlan->vlan_id, 0, false);
7450                         if (ret) {
7451                                 dev_err(&hdev->pdev->dev,
7452                                         "restore vport vlan list failed, ret=%d\n",
7453                                         ret);
7454                                 return ret;
7455                         }
7456                 }
7457                 vlan->hd_tbl_status = true;
7458         }
7459
7460         return 0;
7461 }
7462
7463 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7464                                       bool is_write_tbl)
7465 {
7466         struct hclge_vport_vlan_cfg *vlan, *tmp;
7467         struct hclge_dev *hdev = vport->back;
7468
7469         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7470                 if (vlan->vlan_id == vlan_id) {
7471                         if (is_write_tbl && vlan->hd_tbl_status)
7472                                 hclge_set_vlan_filter_hw(hdev,
7473                                                          htons(ETH_P_8021Q),
7474                                                          vport->vport_id,
7475                                                          vlan_id, 0,
7476                                                          true);
7477
7478                         list_del(&vlan->node);
7479                         kfree(vlan);
7480                         break;
7481                 }
7482         }
7483 }
7484
7485 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7486 {
7487         struct hclge_vport_vlan_cfg *vlan, *tmp;
7488         struct hclge_dev *hdev = vport->back;
7489
7490         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7491                 if (vlan->hd_tbl_status)
7492                         hclge_set_vlan_filter_hw(hdev,
7493                                                  htons(ETH_P_8021Q),
7494                                                  vport->vport_id,
7495                                                  vlan->vlan_id, 0,
7496                                                  true);
7497
7498                 vlan->hd_tbl_status = false;
7499                 if (is_del_list) {
7500                         list_del(&vlan->node);
7501                         kfree(vlan);
7502                 }
7503         }
7504 }
7505
7506 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7507 {
7508         struct hclge_vport_vlan_cfg *vlan, *tmp;
7509         struct hclge_vport *vport;
7510         int i;
7511
7512         mutex_lock(&hdev->vport_cfg_mutex);
7513         for (i = 0; i < hdev->num_alloc_vport; i++) {
7514                 vport = &hdev->vport[i];
7515                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7516                         list_del(&vlan->node);
7517                         kfree(vlan);
7518                 }
7519         }
7520         mutex_unlock(&hdev->vport_cfg_mutex);
7521 }
7522
7523 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7524 {
7525         struct hclge_vport *vport = hclge_get_vport(handle);
7526
7527         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7528                 vport->rxvlan_cfg.strip_tag1_en = false;
7529                 vport->rxvlan_cfg.strip_tag2_en = enable;
7530         } else {
7531                 vport->rxvlan_cfg.strip_tag1_en = enable;
7532                 vport->rxvlan_cfg.strip_tag2_en = true;
7533         }
7534         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7535         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7536         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7537
7538         return hclge_set_vlan_rx_offload_cfg(vport);
7539 }
7540
7541 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7542                                             u16 port_base_vlan_state,
7543                                             struct hclge_vlan_info *new_info,
7544                                             struct hclge_vlan_info *old_info)
7545 {
7546         struct hclge_dev *hdev = vport->back;
7547         int ret;
7548
7549         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7550                 hclge_rm_vport_all_vlan_table(vport, false);
7551                 return hclge_set_vlan_filter_hw(hdev,
7552                                                  htons(new_info->vlan_proto),
7553                                                  vport->vport_id,
7554                                                  new_info->vlan_tag,
7555                                                  new_info->qos, false);
7556         }
7557
7558         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7559                                        vport->vport_id, old_info->vlan_tag,
7560                                        old_info->qos, true);
7561         if (ret)
7562                 return ret;
7563
7564         return hclge_add_vport_all_vlan_table(vport);
7565 }
7566
7567 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7568                                     struct hclge_vlan_info *vlan_info)
7569 {
7570         struct hnae3_handle *nic = &vport->nic;
7571         struct hclge_vlan_info *old_vlan_info;
7572         struct hclge_dev *hdev = vport->back;
7573         int ret;
7574
7575         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7576
7577         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7578         if (ret)
7579                 return ret;
7580
7581         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7582                 /* add new VLAN tag */
7583                 ret = hclge_set_vlan_filter_hw(hdev,
7584                                                htons(vlan_info->vlan_proto),
7585                                                vport->vport_id,
7586                                                vlan_info->vlan_tag,
7587                                                vlan_info->qos, false);
7588                 if (ret)
7589                         return ret;
7590
7591                 /* remove old VLAN tag */
7592                 ret = hclge_set_vlan_filter_hw(hdev,
7593                                                htons(old_vlan_info->vlan_proto),
7594                                                vport->vport_id,
7595                                                old_vlan_info->vlan_tag,
7596                                                old_vlan_info->qos, true);
7597                 if (ret)
7598                         return ret;
7599
7600                 goto update;
7601         }
7602
7603         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7604                                                old_vlan_info);
7605         if (ret)
7606                 return ret;
7607
7608         /* update state only when disable/enable port based VLAN */
7609         vport->port_base_vlan_cfg.state = state;
7610         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7611                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7612         else
7613                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7614
7615 update:
7616         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7617         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7618         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7619
7620         return 0;
7621 }
7622
7623 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7624                                           enum hnae3_port_base_vlan_state state,
7625                                           u16 vlan)
7626 {
7627         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7628                 if (!vlan)
7629                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7630                 else
7631                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7632         } else {
7633                 if (!vlan)
7634                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7635                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7636                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7637                 else
7638                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7639         }
7640 }
7641
7642 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7643                                     u16 vlan, u8 qos, __be16 proto)
7644 {
7645         struct hclge_vport *vport = hclge_get_vport(handle);
7646         struct hclge_dev *hdev = vport->back;
7647         struct hclge_vlan_info vlan_info;
7648         u16 state;
7649         int ret;
7650
7651         if (hdev->pdev->revision == 0x20)
7652                 return -EOPNOTSUPP;
7653
7654         /* qos is a 3 bits value, so can not be bigger than 7 */
7655         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7656                 return -EINVAL;
7657         if (proto != htons(ETH_P_8021Q))
7658                 return -EPROTONOSUPPORT;
7659
7660         vport = &hdev->vport[vfid];
7661         state = hclge_get_port_base_vlan_state(vport,
7662                                                vport->port_base_vlan_cfg.state,
7663                                                vlan);
7664         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7665                 return 0;
7666
7667         vlan_info.vlan_tag = vlan;
7668         vlan_info.qos = qos;
7669         vlan_info.vlan_proto = ntohs(proto);
7670
7671         /* update port based VLAN for PF */
7672         if (!vfid) {
7673                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7674                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7675                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7676
7677                 return ret;
7678         }
7679
7680         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7681                 return hclge_update_port_base_vlan_cfg(vport, state,
7682                                                        &vlan_info);
7683         } else {
7684                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7685                                                         (u8)vfid, state,
7686                                                         vlan, qos,
7687                                                         ntohs(proto));
7688                 return ret;
7689         }
7690 }
7691
7692 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7693                           u16 vlan_id, bool is_kill)
7694 {
7695         struct hclge_vport *vport = hclge_get_vport(handle);
7696         struct hclge_dev *hdev = vport->back;
7697         bool writen_to_tbl = false;
7698         int ret = 0;
7699
7700         /* when port based VLAN enabled, we use port based VLAN as the VLAN
7701          * filter entry. In this case, we don't update VLAN filter table
7702          * when user add new VLAN or remove exist VLAN, just update the vport
7703          * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7704          * table until port based VLAN disabled
7705          */
7706         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7707                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7708                                                vlan_id, 0, is_kill);
7709                 writen_to_tbl = true;
7710         }
7711
7712         if (ret)
7713                 return ret;
7714
7715         if (is_kill)
7716                 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7717         else
7718                 hclge_add_vport_vlan_table(vport, vlan_id,
7719                                            writen_to_tbl);
7720
7721         return 0;
7722 }
7723
7724 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7725 {
7726         struct hclge_config_max_frm_size_cmd *req;
7727         struct hclge_desc desc;
7728
7729         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7730
7731         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7732         req->max_frm_size = cpu_to_le16(new_mps);
7733         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7734
7735         return hclge_cmd_send(&hdev->hw, &desc, 1);
7736 }
7737
7738 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7739 {
7740         struct hclge_vport *vport = hclge_get_vport(handle);
7741
7742         return hclge_set_vport_mtu(vport, new_mtu);
7743 }
7744
7745 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7746 {
7747         struct hclge_dev *hdev = vport->back;
7748         int i, max_frm_size, ret = 0;
7749
7750         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7751         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7752             max_frm_size > HCLGE_MAC_MAX_FRAME)
7753                 return -EINVAL;
7754
7755         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7756         mutex_lock(&hdev->vport_lock);
7757         /* VF's mps must fit within hdev->mps */
7758         if (vport->vport_id && max_frm_size > hdev->mps) {
7759                 mutex_unlock(&hdev->vport_lock);
7760                 return -EINVAL;
7761         } else if (vport->vport_id) {
7762                 vport->mps = max_frm_size;
7763                 mutex_unlock(&hdev->vport_lock);
7764                 return 0;
7765         }
7766
7767         /* PF's mps must be greater then VF's mps */
7768         for (i = 1; i < hdev->num_alloc_vport; i++)
7769                 if (max_frm_size < hdev->vport[i].mps) {
7770                         mutex_unlock(&hdev->vport_lock);
7771                         return -EINVAL;
7772                 }
7773
7774         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7775
7776         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7777         if (ret) {
7778                 dev_err(&hdev->pdev->dev,
7779                         "Change mtu fail, ret =%d\n", ret);
7780                 goto out;
7781         }
7782
7783         hdev->mps = max_frm_size;
7784         vport->mps = max_frm_size;
7785
7786         ret = hclge_buffer_alloc(hdev);
7787         if (ret)
7788                 dev_err(&hdev->pdev->dev,
7789                         "Allocate buffer fail, ret =%d\n", ret);
7790
7791 out:
7792         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7793         mutex_unlock(&hdev->vport_lock);
7794         return ret;
7795 }
7796
7797 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7798                                     bool enable)
7799 {
7800         struct hclge_reset_tqp_queue_cmd *req;
7801         struct hclge_desc desc;
7802         int ret;
7803
7804         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7805
7806         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7807         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7808         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7809
7810         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7811         if (ret) {
7812                 dev_err(&hdev->pdev->dev,
7813                         "Send tqp reset cmd error, status =%d\n", ret);
7814                 return ret;
7815         }
7816
7817         return 0;
7818 }
7819
7820 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7821 {
7822         struct hclge_reset_tqp_queue_cmd *req;
7823         struct hclge_desc desc;
7824         int ret;
7825
7826         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7827
7828         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7829         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7830
7831         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7832         if (ret) {
7833                 dev_err(&hdev->pdev->dev,
7834                         "Get reset status error, status =%d\n", ret);
7835                 return ret;
7836         }
7837
7838         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7839 }
7840
7841 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7842 {
7843         struct hnae3_queue *queue;
7844         struct hclge_tqp *tqp;
7845
7846         queue = handle->kinfo.tqp[queue_id];
7847         tqp = container_of(queue, struct hclge_tqp, q);
7848
7849         return tqp->index;
7850 }
7851
7852 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7853 {
7854         struct hclge_vport *vport = hclge_get_vport(handle);
7855         struct hclge_dev *hdev = vport->back;
7856         int reset_try_times = 0;
7857         int reset_status;
7858         u16 queue_gid;
7859         int ret = 0;
7860
7861         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7862
7863         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7864         if (ret) {
7865                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7866                 return ret;
7867         }
7868
7869         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7870         if (ret) {
7871                 dev_err(&hdev->pdev->dev,
7872                         "Send reset tqp cmd fail, ret = %d\n", ret);
7873                 return ret;
7874         }
7875
7876         reset_try_times = 0;
7877         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7878                 /* Wait for tqp hw reset */
7879                 msleep(20);
7880                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7881                 if (reset_status)
7882                         break;
7883         }
7884
7885         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7886                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7887                 return ret;
7888         }
7889
7890         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7891         if (ret)
7892                 dev_err(&hdev->pdev->dev,
7893                         "Deassert the soft reset fail, ret = %d\n", ret);
7894
7895         return ret;
7896 }
7897
7898 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7899 {
7900         struct hclge_dev *hdev = vport->back;
7901         int reset_try_times = 0;
7902         int reset_status;
7903         u16 queue_gid;
7904         int ret;
7905
7906         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7907
7908         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7909         if (ret) {
7910                 dev_warn(&hdev->pdev->dev,
7911                          "Send reset tqp cmd fail, ret = %d\n", ret);
7912                 return;
7913         }
7914
7915         reset_try_times = 0;
7916         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7917                 /* Wait for tqp hw reset */
7918                 msleep(20);
7919                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7920                 if (reset_status)
7921                         break;
7922         }
7923
7924         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7925                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7926                 return;
7927         }
7928
7929         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7930         if (ret)
7931                 dev_warn(&hdev->pdev->dev,
7932                          "Deassert the soft reset fail, ret = %d\n", ret);
7933 }
7934
7935 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7936 {
7937         struct hclge_vport *vport = hclge_get_vport(handle);
7938         struct hclge_dev *hdev = vport->back;
7939
7940         return hdev->fw_version;
7941 }
7942
7943 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7944 {
7945         struct phy_device *phydev = hdev->hw.mac.phydev;
7946
7947         if (!phydev)
7948                 return;
7949
7950         phy_set_asym_pause(phydev, rx_en, tx_en);
7951 }
7952
7953 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7954 {
7955         int ret;
7956
7957         if (rx_en && tx_en)
7958                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7959         else if (rx_en && !tx_en)
7960                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7961         else if (!rx_en && tx_en)
7962                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7963         else
7964                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7965
7966         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7967                 return 0;
7968
7969         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7970         if (ret) {
7971                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7972                         ret);
7973                 return ret;
7974         }
7975
7976         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7977
7978         return 0;
7979 }
7980
7981 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7982 {
7983         struct phy_device *phydev = hdev->hw.mac.phydev;
7984         u16 remote_advertising = 0;
7985         u16 local_advertising = 0;
7986         u32 rx_pause, tx_pause;
7987         u8 flowctl;
7988
7989         if (!phydev->link || !phydev->autoneg)
7990                 return 0;
7991
7992         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7993
7994         if (phydev->pause)
7995                 remote_advertising = LPA_PAUSE_CAP;
7996
7997         if (phydev->asym_pause)
7998                 remote_advertising |= LPA_PAUSE_ASYM;
7999
8000         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8001                                            remote_advertising);
8002         tx_pause = flowctl & FLOW_CTRL_TX;
8003         rx_pause = flowctl & FLOW_CTRL_RX;
8004
8005         if (phydev->duplex == HCLGE_MAC_HALF) {
8006                 tx_pause = 0;
8007                 rx_pause = 0;
8008         }
8009
8010         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8011 }
8012
8013 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8014                                  u32 *rx_en, u32 *tx_en)
8015 {
8016         struct hclge_vport *vport = hclge_get_vport(handle);
8017         struct hclge_dev *hdev = vport->back;
8018
8019         *auto_neg = hclge_get_autoneg(handle);
8020
8021         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8022                 *rx_en = 0;
8023                 *tx_en = 0;
8024                 return;
8025         }
8026
8027         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8028                 *rx_en = 1;
8029                 *tx_en = 0;
8030         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8031                 *tx_en = 1;
8032                 *rx_en = 0;
8033         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8034                 *rx_en = 1;
8035                 *tx_en = 1;
8036         } else {
8037                 *rx_en = 0;
8038                 *tx_en = 0;
8039         }
8040 }
8041
8042 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8043                                 u32 rx_en, u32 tx_en)
8044 {
8045         struct hclge_vport *vport = hclge_get_vport(handle);
8046         struct hclge_dev *hdev = vport->back;
8047         struct phy_device *phydev = hdev->hw.mac.phydev;
8048         u32 fc_autoneg;
8049
8050         fc_autoneg = hclge_get_autoneg(handle);
8051         if (auto_neg != fc_autoneg) {
8052                 dev_info(&hdev->pdev->dev,
8053                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8054                 return -EOPNOTSUPP;
8055         }
8056
8057         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8058                 dev_info(&hdev->pdev->dev,
8059                          "Priority flow control enabled. Cannot set link flow control.\n");
8060                 return -EOPNOTSUPP;
8061         }
8062
8063         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8064
8065         if (!fc_autoneg)
8066                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8067
8068         if (phydev)
8069                 return phy_start_aneg(phydev);
8070
8071         if (hdev->pdev->revision == 0x20)
8072                 return -EOPNOTSUPP;
8073
8074         return hclge_restart_autoneg(handle);
8075 }
8076
8077 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8078                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8079 {
8080         struct hclge_vport *vport = hclge_get_vport(handle);
8081         struct hclge_dev *hdev = vport->back;
8082
8083         if (speed)
8084                 *speed = hdev->hw.mac.speed;
8085         if (duplex)
8086                 *duplex = hdev->hw.mac.duplex;
8087         if (auto_neg)
8088                 *auto_neg = hdev->hw.mac.autoneg;
8089 }
8090
8091 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8092                                  u8 *module_type)
8093 {
8094         struct hclge_vport *vport = hclge_get_vport(handle);
8095         struct hclge_dev *hdev = vport->back;
8096
8097         if (media_type)
8098                 *media_type = hdev->hw.mac.media_type;
8099
8100         if (module_type)
8101                 *module_type = hdev->hw.mac.module_type;
8102 }
8103
8104 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8105                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8106 {
8107         struct hclge_vport *vport = hclge_get_vport(handle);
8108         struct hclge_dev *hdev = vport->back;
8109         struct phy_device *phydev = hdev->hw.mac.phydev;
8110         int mdix_ctrl, mdix, retval, is_resolved;
8111
8112         if (!phydev) {
8113                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8114                 *tp_mdix = ETH_TP_MDI_INVALID;
8115                 return;
8116         }
8117
8118         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8119
8120         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8121         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8122                                     HCLGE_PHY_MDIX_CTRL_S);
8123
8124         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8125         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8126         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8127
8128         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8129
8130         switch (mdix_ctrl) {
8131         case 0x0:
8132                 *tp_mdix_ctrl = ETH_TP_MDI;
8133                 break;
8134         case 0x1:
8135                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8136                 break;
8137         case 0x3:
8138                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8139                 break;
8140         default:
8141                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8142                 break;
8143         }
8144
8145         if (!is_resolved)
8146                 *tp_mdix = ETH_TP_MDI_INVALID;
8147         else if (mdix)
8148                 *tp_mdix = ETH_TP_MDI_X;
8149         else
8150                 *tp_mdix = ETH_TP_MDI;
8151 }
8152
8153 static void hclge_info_show(struct hclge_dev *hdev)
8154 {
8155         struct device *dev = &hdev->pdev->dev;
8156
8157         dev_info(dev, "PF info begin:\n");
8158
8159         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8160         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8161         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8162         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8163         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8164         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8165         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8166         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8167         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8168         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8169         dev_info(dev, "This is %s PF\n",
8170                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8171         dev_info(dev, "DCB %s\n",
8172                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8173         dev_info(dev, "MQPRIO %s\n",
8174                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8175
8176         dev_info(dev, "PF info end.\n");
8177 }
8178
8179 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8180                                           struct hclge_vport *vport)
8181 {
8182         struct hnae3_client *client = vport->nic.client;
8183         struct hclge_dev *hdev = ae_dev->priv;
8184         int ret;
8185
8186         ret = client->ops->init_instance(&vport->nic);
8187         if (ret)
8188                 return ret;
8189
8190         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8191         hnae3_set_client_init_flag(client, ae_dev, 1);
8192
8193         if (netif_msg_drv(&hdev->vport->nic))
8194                 hclge_info_show(hdev);
8195
8196         return 0;
8197 }
8198
8199 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8200                                            struct hclge_vport *vport)
8201 {
8202         struct hnae3_client *client = vport->roce.client;
8203         struct hclge_dev *hdev = ae_dev->priv;
8204         int ret;
8205
8206         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8207             !hdev->nic_client)
8208                 return 0;
8209
8210         client = hdev->roce_client;
8211         ret = hclge_init_roce_base_info(vport);
8212         if (ret)
8213                 return ret;
8214
8215         ret = client->ops->init_instance(&vport->roce);
8216         if (ret)
8217                 return ret;
8218
8219         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8220         hnae3_set_client_init_flag(client, ae_dev, 1);
8221
8222         return 0;
8223 }
8224
8225 static int hclge_init_client_instance(struct hnae3_client *client,
8226                                       struct hnae3_ae_dev *ae_dev)
8227 {
8228         struct hclge_dev *hdev = ae_dev->priv;
8229         struct hclge_vport *vport;
8230         int i, ret;
8231
8232         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8233                 vport = &hdev->vport[i];
8234
8235                 switch (client->type) {
8236                 case HNAE3_CLIENT_KNIC:
8237
8238                         hdev->nic_client = client;
8239                         vport->nic.client = client;
8240                         ret = hclge_init_nic_client_instance(ae_dev, vport);
8241                         if (ret)
8242                                 goto clear_nic;
8243
8244                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8245                         if (ret)
8246                                 goto clear_roce;
8247
8248                         break;
8249                 case HNAE3_CLIENT_UNIC:
8250                         hdev->nic_client = client;
8251                         vport->nic.client = client;
8252
8253                         ret = client->ops->init_instance(&vport->nic);
8254                         if (ret)
8255                                 goto clear_nic;
8256
8257                         hnae3_set_client_init_flag(client, ae_dev, 1);
8258
8259                         break;
8260                 case HNAE3_CLIENT_ROCE:
8261                         if (hnae3_dev_roce_supported(hdev)) {
8262                                 hdev->roce_client = client;
8263                                 vport->roce.client = client;
8264                         }
8265
8266                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8267                         if (ret)
8268                                 goto clear_roce;
8269
8270                         break;
8271                 default:
8272                         return -EINVAL;
8273                 }
8274         }
8275
8276         return 0;
8277
8278 clear_nic:
8279         hdev->nic_client = NULL;
8280         vport->nic.client = NULL;
8281         return ret;
8282 clear_roce:
8283         hdev->roce_client = NULL;
8284         vport->roce.client = NULL;
8285         return ret;
8286 }
8287
8288 static void hclge_uninit_client_instance(struct hnae3_client *client,
8289                                          struct hnae3_ae_dev *ae_dev)
8290 {
8291         struct hclge_dev *hdev = ae_dev->priv;
8292         struct hclge_vport *vport;
8293         int i;
8294
8295         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8296                 vport = &hdev->vport[i];
8297                 if (hdev->roce_client) {
8298                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8299                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8300                                                                 0);
8301                         hdev->roce_client = NULL;
8302                         vport->roce.client = NULL;
8303                 }
8304                 if (client->type == HNAE3_CLIENT_ROCE)
8305                         return;
8306                 if (hdev->nic_client && client->ops->uninit_instance) {
8307                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8308                         client->ops->uninit_instance(&vport->nic, 0);
8309                         hdev->nic_client = NULL;
8310                         vport->nic.client = NULL;
8311                 }
8312         }
8313 }
8314
8315 static int hclge_pci_init(struct hclge_dev *hdev)
8316 {
8317         struct pci_dev *pdev = hdev->pdev;
8318         struct hclge_hw *hw;
8319         int ret;
8320
8321         ret = pci_enable_device(pdev);
8322         if (ret) {
8323                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8324                 return ret;
8325         }
8326
8327         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8328         if (ret) {
8329                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8330                 if (ret) {
8331                         dev_err(&pdev->dev,
8332                                 "can't set consistent PCI DMA");
8333                         goto err_disable_device;
8334                 }
8335                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8336         }
8337
8338         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8339         if (ret) {
8340                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8341                 goto err_disable_device;
8342         }
8343
8344         pci_set_master(pdev);
8345         hw = &hdev->hw;
8346         hw->io_base = pcim_iomap(pdev, 2, 0);
8347         if (!hw->io_base) {
8348                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8349                 ret = -ENOMEM;
8350                 goto err_clr_master;
8351         }
8352
8353         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8354
8355         return 0;
8356 err_clr_master:
8357         pci_clear_master(pdev);
8358         pci_release_regions(pdev);
8359 err_disable_device:
8360         pci_disable_device(pdev);
8361
8362         return ret;
8363 }
8364
8365 static void hclge_pci_uninit(struct hclge_dev *hdev)
8366 {
8367         struct pci_dev *pdev = hdev->pdev;
8368
8369         pcim_iounmap(pdev, hdev->hw.io_base);
8370         pci_free_irq_vectors(pdev);
8371         pci_clear_master(pdev);
8372         pci_release_mem_regions(pdev);
8373         pci_disable_device(pdev);
8374 }
8375
8376 static void hclge_state_init(struct hclge_dev *hdev)
8377 {
8378         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8379         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8380         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8381         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8382         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8383         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8384 }
8385
8386 static void hclge_state_uninit(struct hclge_dev *hdev)
8387 {
8388         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8389         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8390
8391         if (hdev->service_timer.function)
8392                 del_timer_sync(&hdev->service_timer);
8393         if (hdev->reset_timer.function)
8394                 del_timer_sync(&hdev->reset_timer);
8395         if (hdev->service_task.func)
8396                 cancel_work_sync(&hdev->service_task);
8397         if (hdev->rst_service_task.func)
8398                 cancel_work_sync(&hdev->rst_service_task);
8399         if (hdev->mbx_service_task.func)
8400                 cancel_work_sync(&hdev->mbx_service_task);
8401 }
8402
8403 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8404 {
8405 #define HCLGE_FLR_WAIT_MS       100
8406 #define HCLGE_FLR_WAIT_CNT      50
8407         struct hclge_dev *hdev = ae_dev->priv;
8408         int cnt = 0;
8409
8410         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8411         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8412         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8413         hclge_reset_event(hdev->pdev, NULL);
8414
8415         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8416                cnt++ < HCLGE_FLR_WAIT_CNT)
8417                 msleep(HCLGE_FLR_WAIT_MS);
8418
8419         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8420                 dev_err(&hdev->pdev->dev,
8421                         "flr wait down timeout: %d\n", cnt);
8422 }
8423
8424 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8425 {
8426         struct hclge_dev *hdev = ae_dev->priv;
8427
8428         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8429 }
8430
8431 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8432 {
8433         struct pci_dev *pdev = ae_dev->pdev;
8434         struct hclge_dev *hdev;
8435         int ret;
8436
8437         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8438         if (!hdev) {
8439                 ret = -ENOMEM;
8440                 goto out;
8441         }
8442
8443         hdev->pdev = pdev;
8444         hdev->ae_dev = ae_dev;
8445         hdev->reset_type = HNAE3_NONE_RESET;
8446         hdev->reset_level = HNAE3_FUNC_RESET;
8447         ae_dev->priv = hdev;
8448         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8449
8450         mutex_init(&hdev->vport_lock);
8451         mutex_init(&hdev->vport_cfg_mutex);
8452         spin_lock_init(&hdev->fd_rule_lock);
8453
8454         ret = hclge_pci_init(hdev);
8455         if (ret) {
8456                 dev_err(&pdev->dev, "PCI init failed\n");
8457                 goto out;
8458         }
8459
8460         /* Firmware command queue initialize */
8461         ret = hclge_cmd_queue_init(hdev);
8462         if (ret) {
8463                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8464                 goto err_pci_uninit;
8465         }
8466
8467         /* Firmware command initialize */
8468         ret = hclge_cmd_init(hdev);
8469         if (ret)
8470                 goto err_cmd_uninit;
8471
8472         ret = hclge_get_cap(hdev);
8473         if (ret) {
8474                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8475                         ret);
8476                 goto err_cmd_uninit;
8477         }
8478
8479         ret = hclge_configure(hdev);
8480         if (ret) {
8481                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8482                 goto err_cmd_uninit;
8483         }
8484
8485         ret = hclge_init_msi(hdev);
8486         if (ret) {
8487                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8488                 goto err_cmd_uninit;
8489         }
8490
8491         ret = hclge_misc_irq_init(hdev);
8492         if (ret) {
8493                 dev_err(&pdev->dev,
8494                         "Misc IRQ(vector0) init error, ret = %d.\n",
8495                         ret);
8496                 goto err_msi_uninit;
8497         }
8498
8499         ret = hclge_alloc_tqps(hdev);
8500         if (ret) {
8501                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8502                 goto err_msi_irq_uninit;
8503         }
8504
8505         ret = hclge_alloc_vport(hdev);
8506         if (ret) {
8507                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8508                 goto err_msi_irq_uninit;
8509         }
8510
8511         ret = hclge_map_tqp(hdev);
8512         if (ret) {
8513                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8514                 goto err_msi_irq_uninit;
8515         }
8516
8517         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8518                 ret = hclge_mac_mdio_config(hdev);
8519                 if (ret) {
8520                         dev_err(&hdev->pdev->dev,
8521                                 "mdio config fail ret=%d\n", ret);
8522                         goto err_msi_irq_uninit;
8523                 }
8524         }
8525
8526         ret = hclge_init_umv_space(hdev);
8527         if (ret) {
8528                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8529                 goto err_mdiobus_unreg;
8530         }
8531
8532         ret = hclge_mac_init(hdev);
8533         if (ret) {
8534                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8535                 goto err_mdiobus_unreg;
8536         }
8537
8538         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8539         if (ret) {
8540                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8541                 goto err_mdiobus_unreg;
8542         }
8543
8544         ret = hclge_config_gro(hdev, true);
8545         if (ret)
8546                 goto err_mdiobus_unreg;
8547
8548         ret = hclge_init_vlan_config(hdev);
8549         if (ret) {
8550                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8551                 goto err_mdiobus_unreg;
8552         }
8553
8554         ret = hclge_tm_schd_init(hdev);
8555         if (ret) {
8556                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8557                 goto err_mdiobus_unreg;
8558         }
8559
8560         hclge_rss_init_cfg(hdev);
8561         ret = hclge_rss_init_hw(hdev);
8562         if (ret) {
8563                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8564                 goto err_mdiobus_unreg;
8565         }
8566
8567         ret = init_mgr_tbl(hdev);
8568         if (ret) {
8569                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8570                 goto err_mdiobus_unreg;
8571         }
8572
8573         ret = hclge_init_fd_config(hdev);
8574         if (ret) {
8575                 dev_err(&pdev->dev,
8576                         "fd table init fail, ret=%d\n", ret);
8577                 goto err_mdiobus_unreg;
8578         }
8579
8580         ret = hclge_hw_error_set_state(hdev, true);
8581         if (ret) {
8582                 dev_err(&pdev->dev,
8583                         "fail(%d) to enable hw error interrupts\n", ret);
8584                 goto err_mdiobus_unreg;
8585         }
8586
8587         INIT_KFIFO(hdev->mac_tnl_log);
8588
8589         hclge_dcb_ops_set(hdev);
8590
8591         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8592         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8593         INIT_WORK(&hdev->service_task, hclge_service_task);
8594         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8595         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8596
8597         hclge_clear_all_event_cause(hdev);
8598
8599         /* Enable MISC vector(vector0) */
8600         hclge_enable_vector(&hdev->misc_vector, true);
8601
8602         hclge_state_init(hdev);
8603         hdev->last_reset_time = jiffies;
8604
8605         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8606         return 0;
8607
8608 err_mdiobus_unreg:
8609         if (hdev->hw.mac.phydev)
8610                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8611 err_msi_irq_uninit:
8612         hclge_misc_irq_uninit(hdev);
8613 err_msi_uninit:
8614         pci_free_irq_vectors(pdev);
8615 err_cmd_uninit:
8616         hclge_cmd_uninit(hdev);
8617 err_pci_uninit:
8618         pcim_iounmap(pdev, hdev->hw.io_base);
8619         pci_clear_master(pdev);
8620         pci_release_regions(pdev);
8621         pci_disable_device(pdev);
8622 out:
8623         return ret;
8624 }
8625
8626 static void hclge_stats_clear(struct hclge_dev *hdev)
8627 {
8628         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8629 }
8630
8631 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8632 {
8633         struct hclge_vport *vport = hdev->vport;
8634         int i;
8635
8636         for (i = 0; i < hdev->num_alloc_vport; i++) {
8637                 hclge_vport_stop(vport);
8638                 vport++;
8639         }
8640 }
8641
8642 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8643 {
8644         struct hclge_dev *hdev = ae_dev->priv;
8645         struct pci_dev *pdev = ae_dev->pdev;
8646         int ret;
8647
8648         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8649
8650         hclge_stats_clear(hdev);
8651         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8652
8653         ret = hclge_cmd_init(hdev);
8654         if (ret) {
8655                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8656                 return ret;
8657         }
8658
8659         ret = hclge_map_tqp(hdev);
8660         if (ret) {
8661                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8662                 return ret;
8663         }
8664
8665         hclge_reset_umv_space(hdev);
8666
8667         ret = hclge_mac_init(hdev);
8668         if (ret) {
8669                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8670                 return ret;
8671         }
8672
8673         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8674         if (ret) {
8675                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8676                 return ret;
8677         }
8678
8679         ret = hclge_config_gro(hdev, true);
8680         if (ret)
8681                 return ret;
8682
8683         ret = hclge_init_vlan_config(hdev);
8684         if (ret) {
8685                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8686                 return ret;
8687         }
8688
8689         ret = hclge_tm_init_hw(hdev, true);
8690         if (ret) {
8691                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8692                 return ret;
8693         }
8694
8695         ret = hclge_rss_init_hw(hdev);
8696         if (ret) {
8697                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8698                 return ret;
8699         }
8700
8701         ret = hclge_init_fd_config(hdev);
8702         if (ret) {
8703                 dev_err(&pdev->dev,
8704                         "fd table init fail, ret=%d\n", ret);
8705                 return ret;
8706         }
8707
8708         /* Re-enable the hw error interrupts because
8709          * the interrupts get disabled on core/global reset.
8710          */
8711         ret = hclge_hw_error_set_state(hdev, true);
8712         if (ret) {
8713                 dev_err(&pdev->dev,
8714                         "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8715                 return ret;
8716         }
8717
8718         hclge_reset_vport_state(hdev);
8719
8720         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8721                  HCLGE_DRIVER_NAME);
8722
8723         return 0;
8724 }
8725
8726 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8727 {
8728         struct hclge_dev *hdev = ae_dev->priv;
8729         struct hclge_mac *mac = &hdev->hw.mac;
8730
8731         hclge_state_uninit(hdev);
8732
8733         if (mac->phydev)
8734                 mdiobus_unregister(mac->mdio_bus);
8735
8736         hclge_uninit_umv_space(hdev);
8737
8738         /* Disable MISC vector(vector0) */
8739         hclge_enable_vector(&hdev->misc_vector, false);
8740         synchronize_irq(hdev->misc_vector.vector_irq);
8741
8742         hclge_config_mac_tnl_int(hdev, false);
8743         hclge_hw_error_set_state(hdev, false);
8744         hclge_cmd_uninit(hdev);
8745         hclge_misc_irq_uninit(hdev);
8746         hclge_pci_uninit(hdev);
8747         mutex_destroy(&hdev->vport_lock);
8748         hclge_uninit_vport_mac_table(hdev);
8749         hclge_uninit_vport_vlan_table(hdev);
8750         mutex_destroy(&hdev->vport_cfg_mutex);
8751         ae_dev->priv = NULL;
8752 }
8753
8754 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8755 {
8756         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8757         struct hclge_vport *vport = hclge_get_vport(handle);
8758         struct hclge_dev *hdev = vport->back;
8759
8760         return min_t(u32, hdev->rss_size_max,
8761                      vport->alloc_tqps / kinfo->num_tc);
8762 }
8763
8764 static void hclge_get_channels(struct hnae3_handle *handle,
8765                                struct ethtool_channels *ch)
8766 {
8767         ch->max_combined = hclge_get_max_channels(handle);
8768         ch->other_count = 1;
8769         ch->max_other = 1;
8770         ch->combined_count = handle->kinfo.rss_size;
8771 }
8772
8773 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8774                                         u16 *alloc_tqps, u16 *max_rss_size)
8775 {
8776         struct hclge_vport *vport = hclge_get_vport(handle);
8777         struct hclge_dev *hdev = vport->back;
8778
8779         *alloc_tqps = vport->alloc_tqps;
8780         *max_rss_size = hdev->rss_size_max;
8781 }
8782
8783 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8784                               bool rxfh_configured)
8785 {
8786         struct hclge_vport *vport = hclge_get_vport(handle);
8787         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8788         struct hclge_dev *hdev = vport->back;
8789         int cur_rss_size = kinfo->rss_size;
8790         int cur_tqps = kinfo->num_tqps;
8791         u16 tc_offset[HCLGE_MAX_TC_NUM];
8792         u16 tc_valid[HCLGE_MAX_TC_NUM];
8793         u16 tc_size[HCLGE_MAX_TC_NUM];
8794         u16 roundup_size;
8795         u32 *rss_indir;
8796         int ret, i;
8797
8798         kinfo->req_rss_size = new_tqps_num;
8799
8800         ret = hclge_tm_vport_map_update(hdev);
8801         if (ret) {
8802                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8803                 return ret;
8804         }
8805
8806         roundup_size = roundup_pow_of_two(kinfo->rss_size);
8807         roundup_size = ilog2(roundup_size);
8808         /* Set the RSS TC mode according to the new RSS size */
8809         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8810                 tc_valid[i] = 0;
8811
8812                 if (!(hdev->hw_tc_map & BIT(i)))
8813                         continue;
8814
8815                 tc_valid[i] = 1;
8816                 tc_size[i] = roundup_size;
8817                 tc_offset[i] = kinfo->rss_size * i;
8818         }
8819         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8820         if (ret)
8821                 return ret;
8822
8823         /* RSS indirection table has been configuared by user */
8824         if (rxfh_configured)
8825                 goto out;
8826
8827         /* Reinitializes the rss indirect table according to the new RSS size */
8828         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8829         if (!rss_indir)
8830                 return -ENOMEM;
8831
8832         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8833                 rss_indir[i] = i % kinfo->rss_size;
8834
8835         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8836         if (ret)
8837                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8838                         ret);
8839
8840         kfree(rss_indir);
8841
8842 out:
8843         if (!ret)
8844                 dev_info(&hdev->pdev->dev,
8845                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8846                          cur_rss_size, kinfo->rss_size,
8847                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
8848
8849         return ret;
8850 }
8851
8852 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8853                               u32 *regs_num_64_bit)
8854 {
8855         struct hclge_desc desc;
8856         u32 total_num;
8857         int ret;
8858
8859         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8860         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8861         if (ret) {
8862                 dev_err(&hdev->pdev->dev,
8863                         "Query register number cmd failed, ret = %d.\n", ret);
8864                 return ret;
8865         }
8866
8867         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8868         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8869
8870         total_num = *regs_num_32_bit + *regs_num_64_bit;
8871         if (!total_num)
8872                 return -EINVAL;
8873
8874         return 0;
8875 }
8876
8877 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8878                                  void *data)
8879 {
8880 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8881
8882         struct hclge_desc *desc;
8883         u32 *reg_val = data;
8884         __le32 *desc_data;
8885         int cmd_num;
8886         int i, k, n;
8887         int ret;
8888
8889         if (regs_num == 0)
8890                 return 0;
8891
8892         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8893         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8894         if (!desc)
8895                 return -ENOMEM;
8896
8897         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8898         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8899         if (ret) {
8900                 dev_err(&hdev->pdev->dev,
8901                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8902                 kfree(desc);
8903                 return ret;
8904         }
8905
8906         for (i = 0; i < cmd_num; i++) {
8907                 if (i == 0) {
8908                         desc_data = (__le32 *)(&desc[i].data[0]);
8909                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8910                 } else {
8911                         desc_data = (__le32 *)(&desc[i]);
8912                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8913                 }
8914                 for (k = 0; k < n; k++) {
8915                         *reg_val++ = le32_to_cpu(*desc_data++);
8916
8917                         regs_num--;
8918                         if (!regs_num)
8919                                 break;
8920                 }
8921         }
8922
8923         kfree(desc);
8924         return 0;
8925 }
8926
8927 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8928                                  void *data)
8929 {
8930 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8931
8932         struct hclge_desc *desc;
8933         u64 *reg_val = data;
8934         __le64 *desc_data;
8935         int cmd_num;
8936         int i, k, n;
8937         int ret;
8938
8939         if (regs_num == 0)
8940                 return 0;
8941
8942         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8943         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8944         if (!desc)
8945                 return -ENOMEM;
8946
8947         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8948         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8949         if (ret) {
8950                 dev_err(&hdev->pdev->dev,
8951                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
8952                 kfree(desc);
8953                 return ret;
8954         }
8955
8956         for (i = 0; i < cmd_num; i++) {
8957                 if (i == 0) {
8958                         desc_data = (__le64 *)(&desc[i].data[0]);
8959                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8960                 } else {
8961                         desc_data = (__le64 *)(&desc[i]);
8962                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
8963                 }
8964                 for (k = 0; k < n; k++) {
8965                         *reg_val++ = le64_to_cpu(*desc_data++);
8966
8967                         regs_num--;
8968                         if (!regs_num)
8969                                 break;
8970                 }
8971         }
8972
8973         kfree(desc);
8974         return 0;
8975 }
8976
8977 #define MAX_SEPARATE_NUM        4
8978 #define SEPARATOR_VALUE         0xFFFFFFFF
8979 #define REG_NUM_PER_LINE        4
8980 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
8981
8982 static int hclge_get_regs_len(struct hnae3_handle *handle)
8983 {
8984         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8985         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8986         struct hclge_vport *vport = hclge_get_vport(handle);
8987         struct hclge_dev *hdev = vport->back;
8988         u32 regs_num_32_bit, regs_num_64_bit;
8989         int ret;
8990
8991         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8992         if (ret) {
8993                 dev_err(&hdev->pdev->dev,
8994                         "Get register number failed, ret = %d.\n", ret);
8995                 return -EOPNOTSUPP;
8996         }
8997
8998         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8999         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9000         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9001         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9002
9003         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9004                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9005                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9006 }
9007
9008 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9009                            void *data)
9010 {
9011         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9012         struct hclge_vport *vport = hclge_get_vport(handle);
9013         struct hclge_dev *hdev = vport->back;
9014         u32 regs_num_32_bit, regs_num_64_bit;
9015         int i, j, reg_um, separator_num;
9016         u32 *reg = data;
9017         int ret;
9018
9019         *version = hdev->fw_version;
9020
9021         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9022         if (ret) {
9023                 dev_err(&hdev->pdev->dev,
9024                         "Get register number failed, ret = %d.\n", ret);
9025                 return;
9026         }
9027
9028         /* fetching per-PF registers valus from PF PCIe register space */
9029         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9030         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9031         for (i = 0; i < reg_um; i++)
9032                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9033         for (i = 0; i < separator_num; i++)
9034                 *reg++ = SEPARATOR_VALUE;
9035
9036         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9037         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9038         for (i = 0; i < reg_um; i++)
9039                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9040         for (i = 0; i < separator_num; i++)
9041                 *reg++ = SEPARATOR_VALUE;
9042
9043         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9044         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9045         for (j = 0; j < kinfo->num_tqps; j++) {
9046                 for (i = 0; i < reg_um; i++)
9047                         *reg++ = hclge_read_dev(&hdev->hw,
9048                                                 ring_reg_addr_list[i] +
9049                                                 0x200 * j);
9050                 for (i = 0; i < separator_num; i++)
9051                         *reg++ = SEPARATOR_VALUE;
9052         }
9053
9054         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9055         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9056         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9057                 for (i = 0; i < reg_um; i++)
9058                         *reg++ = hclge_read_dev(&hdev->hw,
9059                                                 tqp_intr_reg_addr_list[i] +
9060                                                 4 * j);
9061                 for (i = 0; i < separator_num; i++)
9062                         *reg++ = SEPARATOR_VALUE;
9063         }
9064
9065         /* fetching PF common registers values from firmware */
9066         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9067         if (ret) {
9068                 dev_err(&hdev->pdev->dev,
9069                         "Get 32 bit register failed, ret = %d.\n", ret);
9070                 return;
9071         }
9072
9073         reg += regs_num_32_bit;
9074         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9075         if (ret)
9076                 dev_err(&hdev->pdev->dev,
9077                         "Get 64 bit register failed, ret = %d.\n", ret);
9078 }
9079
9080 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9081 {
9082         struct hclge_set_led_state_cmd *req;
9083         struct hclge_desc desc;
9084         int ret;
9085
9086         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9087
9088         req = (struct hclge_set_led_state_cmd *)desc.data;
9089         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9090                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9091
9092         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9093         if (ret)
9094                 dev_err(&hdev->pdev->dev,
9095                         "Send set led state cmd error, ret =%d\n", ret);
9096
9097         return ret;
9098 }
9099
9100 enum hclge_led_status {
9101         HCLGE_LED_OFF,
9102         HCLGE_LED_ON,
9103         HCLGE_LED_NO_CHANGE = 0xFF,
9104 };
9105
9106 static int hclge_set_led_id(struct hnae3_handle *handle,
9107                             enum ethtool_phys_id_state status)
9108 {
9109         struct hclge_vport *vport = hclge_get_vport(handle);
9110         struct hclge_dev *hdev = vport->back;
9111
9112         switch (status) {
9113         case ETHTOOL_ID_ACTIVE:
9114                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9115         case ETHTOOL_ID_INACTIVE:
9116                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9117         default:
9118                 return -EINVAL;
9119         }
9120 }
9121
9122 static void hclge_get_link_mode(struct hnae3_handle *handle,
9123                                 unsigned long *supported,
9124                                 unsigned long *advertising)
9125 {
9126         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9127         struct hclge_vport *vport = hclge_get_vport(handle);
9128         struct hclge_dev *hdev = vport->back;
9129         unsigned int idx = 0;
9130
9131         for (; idx < size; idx++) {
9132                 supported[idx] = hdev->hw.mac.supported[idx];
9133                 advertising[idx] = hdev->hw.mac.advertising[idx];
9134         }
9135 }
9136
9137 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9138 {
9139         struct hclge_vport *vport = hclge_get_vport(handle);
9140         struct hclge_dev *hdev = vport->back;
9141
9142         return hclge_config_gro(hdev, enable);
9143 }
9144
9145 static const struct hnae3_ae_ops hclge_ops = {
9146         .init_ae_dev = hclge_init_ae_dev,
9147         .uninit_ae_dev = hclge_uninit_ae_dev,
9148         .flr_prepare = hclge_flr_prepare,
9149         .flr_done = hclge_flr_done,
9150         .init_client_instance = hclge_init_client_instance,
9151         .uninit_client_instance = hclge_uninit_client_instance,
9152         .map_ring_to_vector = hclge_map_ring_to_vector,
9153         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9154         .get_vector = hclge_get_vector,
9155         .put_vector = hclge_put_vector,
9156         .set_promisc_mode = hclge_set_promisc_mode,
9157         .set_loopback = hclge_set_loopback,
9158         .start = hclge_ae_start,
9159         .stop = hclge_ae_stop,
9160         .client_start = hclge_client_start,
9161         .client_stop = hclge_client_stop,
9162         .get_status = hclge_get_status,
9163         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9164         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9165         .get_media_type = hclge_get_media_type,
9166         .check_port_speed = hclge_check_port_speed,
9167         .get_fec = hclge_get_fec,
9168         .set_fec = hclge_set_fec,
9169         .get_rss_key_size = hclge_get_rss_key_size,
9170         .get_rss_indir_size = hclge_get_rss_indir_size,
9171         .get_rss = hclge_get_rss,
9172         .set_rss = hclge_set_rss,
9173         .set_rss_tuple = hclge_set_rss_tuple,
9174         .get_rss_tuple = hclge_get_rss_tuple,
9175         .get_tc_size = hclge_get_tc_size,
9176         .get_mac_addr = hclge_get_mac_addr,
9177         .set_mac_addr = hclge_set_mac_addr,
9178         .do_ioctl = hclge_do_ioctl,
9179         .add_uc_addr = hclge_add_uc_addr,
9180         .rm_uc_addr = hclge_rm_uc_addr,
9181         .add_mc_addr = hclge_add_mc_addr,
9182         .rm_mc_addr = hclge_rm_mc_addr,
9183         .set_autoneg = hclge_set_autoneg,
9184         .get_autoneg = hclge_get_autoneg,
9185         .restart_autoneg = hclge_restart_autoneg,
9186         .get_pauseparam = hclge_get_pauseparam,
9187         .set_pauseparam = hclge_set_pauseparam,
9188         .set_mtu = hclge_set_mtu,
9189         .reset_queue = hclge_reset_tqp,
9190         .get_stats = hclge_get_stats,
9191         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9192         .update_stats = hclge_update_stats,
9193         .get_strings = hclge_get_strings,
9194         .get_sset_count = hclge_get_sset_count,
9195         .get_fw_version = hclge_get_fw_version,
9196         .get_mdix_mode = hclge_get_mdix_mode,
9197         .enable_vlan_filter = hclge_enable_vlan_filter,
9198         .set_vlan_filter = hclge_set_vlan_filter,
9199         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9200         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9201         .reset_event = hclge_reset_event,
9202         .set_default_reset_request = hclge_set_def_reset_request,
9203         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9204         .set_channels = hclge_set_channels,
9205         .get_channels = hclge_get_channels,
9206         .get_regs_len = hclge_get_regs_len,
9207         .get_regs = hclge_get_regs,
9208         .set_led_id = hclge_set_led_id,
9209         .get_link_mode = hclge_get_link_mode,
9210         .add_fd_entry = hclge_add_fd_entry,
9211         .del_fd_entry = hclge_del_fd_entry,
9212         .del_all_fd_entries = hclge_del_all_fd_entries,
9213         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9214         .get_fd_rule_info = hclge_get_fd_rule_info,
9215         .get_fd_all_rules = hclge_get_all_rules,
9216         .restore_fd_rules = hclge_restore_fd_entries,
9217         .enable_fd = hclge_enable_fd,
9218         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9219         .dbg_run_cmd = hclge_dbg_run_cmd,
9220         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9221         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9222         .ae_dev_resetting = hclge_ae_dev_resetting,
9223         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9224         .set_gro_en = hclge_gro_en,
9225         .get_global_queue_id = hclge_covert_handle_qid_global,
9226         .set_timer_task = hclge_set_timer_task,
9227         .mac_connect_phy = hclge_mac_connect_phy,
9228         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9229 };
9230
9231 static struct hnae3_ae_algo ae_algo = {
9232         .ops = &hclge_ops,
9233         .pdev_id_table = ae_algo_pci_tbl,
9234 };
9235
9236 static int hclge_init(void)
9237 {
9238         pr_info("%s is initializing\n", HCLGE_NAME);
9239
9240         hnae3_register_ae_algo(&ae_algo);
9241
9242         return 0;
9243 }
9244
9245 static void hclge_exit(void)
9246 {
9247         hnae3_unregister_ae_algo(&ae_algo);
9248 }
9249 module_init(hclge_init);
9250 module_exit(hclge_exit);
9251
9252 MODULE_LICENSE("GPL");
9253 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9254 MODULE_DESCRIPTION("HCLGE Driver");
9255 MODULE_VERSION(HCLGE_MOD_VERSION);