net: hns3: delete the redundant user NIC codes
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37                                u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
40
41 static struct hnae3_ae_algo ae_algo;
42
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51         /* required last entry */
52         {0, }
53 };
54
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
56
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58                                          HCLGE_CMDQ_TX_ADDR_H_REG,
59                                          HCLGE_CMDQ_TX_DEPTH_REG,
60                                          HCLGE_CMDQ_TX_TAIL_REG,
61                                          HCLGE_CMDQ_TX_HEAD_REG,
62                                          HCLGE_CMDQ_RX_ADDR_L_REG,
63                                          HCLGE_CMDQ_RX_ADDR_H_REG,
64                                          HCLGE_CMDQ_RX_DEPTH_REG,
65                                          HCLGE_CMDQ_RX_TAIL_REG,
66                                          HCLGE_CMDQ_RX_HEAD_REG,
67                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
68                                          HCLGE_CMDQ_INTR_STS_REG,
69                                          HCLGE_CMDQ_INTR_EN_REG,
70                                          HCLGE_CMDQ_INTR_GEN_REG};
71
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73                                            HCLGE_VECTOR0_OTER_EN_REG,
74                                            HCLGE_MISC_RESET_STS_REG,
75                                            HCLGE_MISC_VECTOR_INT_STS,
76                                            HCLGE_GLOBAL_RESET_REG,
77                                            HCLGE_FUN_RST_ING,
78                                            HCLGE_GRO_EN_REG};
79
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81                                          HCLGE_RING_RX_ADDR_H_REG,
82                                          HCLGE_RING_RX_BD_NUM_REG,
83                                          HCLGE_RING_RX_BD_LENGTH_REG,
84                                          HCLGE_RING_RX_MERGE_EN_REG,
85                                          HCLGE_RING_RX_TAIL_REG,
86                                          HCLGE_RING_RX_HEAD_REG,
87                                          HCLGE_RING_RX_FBD_NUM_REG,
88                                          HCLGE_RING_RX_OFFSET_REG,
89                                          HCLGE_RING_RX_FBD_OFFSET_REG,
90                                          HCLGE_RING_RX_STASH_REG,
91                                          HCLGE_RING_RX_BD_ERR_REG,
92                                          HCLGE_RING_TX_ADDR_L_REG,
93                                          HCLGE_RING_TX_ADDR_H_REG,
94                                          HCLGE_RING_TX_BD_NUM_REG,
95                                          HCLGE_RING_TX_PRIORITY_REG,
96                                          HCLGE_RING_TX_TC_REG,
97                                          HCLGE_RING_TX_MERGE_EN_REG,
98                                          HCLGE_RING_TX_TAIL_REG,
99                                          HCLGE_RING_TX_HEAD_REG,
100                                          HCLGE_RING_TX_FBD_NUM_REG,
101                                          HCLGE_RING_TX_OFFSET_REG,
102                                          HCLGE_RING_TX_EBD_NUM_REG,
103                                          HCLGE_RING_TX_EBD_OFFSET_REG,
104                                          HCLGE_RING_TX_BD_ERR_REG,
105                                          HCLGE_RING_EN_REG};
106
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108                                              HCLGE_TQP_INTR_GL0_REG,
109                                              HCLGE_TQP_INTR_GL1_REG,
110                                              HCLGE_TQP_INTR_GL2_REG,
111                                              HCLGE_TQP_INTR_RL_REG};
112
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
114         "App    Loopback test",
115         "Serdes serial Loopback test",
116         "Serdes parallel Loopback test",
117         "Phy    Loopback test"
118 };
119
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121         {"mac_tx_mac_pause_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123         {"mac_rx_mac_pause_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125         {"mac_tx_control_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127         {"mac_rx_control_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129         {"mac_tx_pfc_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131         {"mac_tx_pfc_pri0_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133         {"mac_tx_pfc_pri1_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135         {"mac_tx_pfc_pri2_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137         {"mac_tx_pfc_pri3_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139         {"mac_tx_pfc_pri4_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141         {"mac_tx_pfc_pri5_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143         {"mac_tx_pfc_pri6_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145         {"mac_tx_pfc_pri7_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147         {"mac_rx_pfc_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149         {"mac_rx_pfc_pri0_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151         {"mac_rx_pfc_pri1_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153         {"mac_rx_pfc_pri2_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155         {"mac_rx_pfc_pri3_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157         {"mac_rx_pfc_pri4_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159         {"mac_rx_pfc_pri5_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161         {"mac_rx_pfc_pri6_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163         {"mac_rx_pfc_pri7_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165         {"mac_tx_total_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167         {"mac_tx_total_oct_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169         {"mac_tx_good_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171         {"mac_tx_bad_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173         {"mac_tx_good_oct_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175         {"mac_tx_bad_oct_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177         {"mac_tx_uni_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179         {"mac_tx_multi_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181         {"mac_tx_broad_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183         {"mac_tx_undersize_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185         {"mac_tx_oversize_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187         {"mac_tx_64_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189         {"mac_tx_65_127_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191         {"mac_tx_128_255_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193         {"mac_tx_256_511_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195         {"mac_tx_512_1023_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197         {"mac_tx_1024_1518_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199         {"mac_tx_1519_2047_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201         {"mac_tx_2048_4095_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203         {"mac_tx_4096_8191_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205         {"mac_tx_8192_9216_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207         {"mac_tx_9217_12287_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209         {"mac_tx_12288_16383_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211         {"mac_tx_1519_max_good_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213         {"mac_tx_1519_max_bad_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215         {"mac_rx_total_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217         {"mac_rx_total_oct_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219         {"mac_rx_good_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221         {"mac_rx_bad_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223         {"mac_rx_good_oct_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225         {"mac_rx_bad_oct_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227         {"mac_rx_uni_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229         {"mac_rx_multi_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231         {"mac_rx_broad_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233         {"mac_rx_undersize_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235         {"mac_rx_oversize_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237         {"mac_rx_64_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239         {"mac_rx_65_127_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241         {"mac_rx_128_255_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243         {"mac_rx_256_511_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245         {"mac_rx_512_1023_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247         {"mac_rx_1024_1518_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249         {"mac_rx_1519_2047_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251         {"mac_rx_2048_4095_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253         {"mac_rx_4096_8191_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255         {"mac_rx_8192_9216_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257         {"mac_rx_9217_12287_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259         {"mac_rx_12288_16383_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261         {"mac_rx_1519_max_good_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263         {"mac_rx_1519_max_bad_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
265
266         {"mac_tx_fragment_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268         {"mac_tx_undermin_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270         {"mac_tx_jabber_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272         {"mac_tx_err_all_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274         {"mac_tx_from_app_good_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276         {"mac_tx_from_app_bad_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278         {"mac_rx_fragment_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280         {"mac_rx_undermin_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282         {"mac_rx_jabber_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284         {"mac_rx_fcs_err_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286         {"mac_rx_send_app_good_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288         {"mac_rx_send_app_bad_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
290 };
291
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
293         {
294                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
296                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298                 .i_port_bitmap = 0x1,
299         },
300 };
301
302 static const u8 hclge_hash_key[] = {
303         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
308 };
309
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
311 {
312 #define HCLGE_MAC_CMD_NUM 21
313
314         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
316         __le64 *desc_data;
317         int i, k, n;
318         int ret;
319
320         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
322         if (ret) {
323                 dev_err(&hdev->pdev->dev,
324                         "Get MAC pkt stats fail, status = %d.\n", ret);
325
326                 return ret;
327         }
328
329         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330                 /* for special opcode 0032, only the first desc has the head */
331                 if (unlikely(i == 0)) {
332                         desc_data = (__le64 *)(&desc[i].data[0]);
333                         n = HCLGE_RD_FIRST_STATS_NUM;
334                 } else {
335                         desc_data = (__le64 *)(&desc[i]);
336                         n = HCLGE_RD_OTHER_STATS_NUM;
337                 }
338
339                 for (k = 0; k < n; k++) {
340                         *data += le64_to_cpu(*desc_data);
341                         data++;
342                         desc_data++;
343                 }
344         }
345
346         return 0;
347 }
348
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
350 {
351         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352         struct hclge_desc *desc;
353         __le64 *desc_data;
354         u16 i, k, n;
355         int ret;
356
357         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
358         if (!desc)
359                 return -ENOMEM;
360         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
362         if (ret) {
363                 kfree(desc);
364                 return ret;
365         }
366
367         for (i = 0; i < desc_num; i++) {
368                 /* for special opcode 0034, only the first desc has the head */
369                 if (i == 0) {
370                         desc_data = (__le64 *)(&desc[i].data[0]);
371                         n = HCLGE_RD_FIRST_STATS_NUM;
372                 } else {
373                         desc_data = (__le64 *)(&desc[i]);
374                         n = HCLGE_RD_OTHER_STATS_NUM;
375                 }
376
377                 for (k = 0; k < n; k++) {
378                         *data += le64_to_cpu(*desc_data);
379                         data++;
380                         desc_data++;
381                 }
382         }
383
384         kfree(desc);
385
386         return 0;
387 }
388
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
390 {
391         struct hclge_desc desc;
392         __le32 *desc_data;
393         u32 reg_num;
394         int ret;
395
396         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
398         if (ret)
399                 return ret;
400
401         desc_data = (__le32 *)(&desc.data[0]);
402         reg_num = le32_to_cpu(*desc_data);
403
404         *desc_num = 1 + ((reg_num - 3) >> 2) +
405                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
406
407         return 0;
408 }
409
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
411 {
412         u32 desc_num;
413         int ret;
414
415         ret = hclge_mac_query_reg_num(hdev, &desc_num);
416
417         /* The firmware supports the new statistics acquisition method */
418         if (!ret)
419                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420         else if (ret == -EOPNOTSUPP)
421                 ret = hclge_mac_update_stats_defective(hdev);
422         else
423                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
424
425         return ret;
426 }
427
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
429 {
430         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431         struct hclge_vport *vport = hclge_get_vport(handle);
432         struct hclge_dev *hdev = vport->back;
433         struct hnae3_queue *queue;
434         struct hclge_desc desc[1];
435         struct hclge_tqp *tqp;
436         int ret, i;
437
438         for (i = 0; i < kinfo->num_tqps; i++) {
439                 queue = handle->kinfo.tqp[i];
440                 tqp = container_of(queue, struct hclge_tqp, q);
441                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442                 hclge_cmd_setup_basic_desc(&desc[0],
443                                            HCLGE_OPC_QUERY_RX_STATUS,
444                                            true);
445
446                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
448                 if (ret) {
449                         dev_err(&hdev->pdev->dev,
450                                 "Query tqp stat fail, status = %d,queue = %d\n",
451                                 ret,    i);
452                         return ret;
453                 }
454                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455                         le32_to_cpu(desc[0].data[1]);
456         }
457
458         for (i = 0; i < kinfo->num_tqps; i++) {
459                 queue = handle->kinfo.tqp[i];
460                 tqp = container_of(queue, struct hclge_tqp, q);
461                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462                 hclge_cmd_setup_basic_desc(&desc[0],
463                                            HCLGE_OPC_QUERY_TX_STATUS,
464                                            true);
465
466                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
468                 if (ret) {
469                         dev_err(&hdev->pdev->dev,
470                                 "Query tqp stat fail, status = %d,queue = %d\n",
471                                 ret, i);
472                         return ret;
473                 }
474                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475                         le32_to_cpu(desc[0].data[1]);
476         }
477
478         return 0;
479 }
480
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
482 {
483         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484         struct hclge_tqp *tqp;
485         u64 *buff = data;
486         int i;
487
488         for (i = 0; i < kinfo->num_tqps; i++) {
489                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
491         }
492
493         for (i = 0; i < kinfo->num_tqps; i++) {
494                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
496         }
497
498         return buff;
499 }
500
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
502 {
503         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
504
505         return kinfo->num_tqps * (2);
506 }
507
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
509 {
510         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
511         u8 *buff = data;
512         int i = 0;
513
514         for (i = 0; i < kinfo->num_tqps; i++) {
515                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516                         struct hclge_tqp, q);
517                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
518                          tqp->index);
519                 buff = buff + ETH_GSTRING_LEN;
520         }
521
522         for (i = 0; i < kinfo->num_tqps; i++) {
523                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524                         struct hclge_tqp, q);
525                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
526                          tqp->index);
527                 buff = buff + ETH_GSTRING_LEN;
528         }
529
530         return buff;
531 }
532
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534                                  const struct hclge_comm_stats_str strs[],
535                                  int size, u64 *data)
536 {
537         u64 *buf = data;
538         u32 i;
539
540         for (i = 0; i < size; i++)
541                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
542
543         return buf + size;
544 }
545
546 static u8 *hclge_comm_get_strings(u32 stringset,
547                                   const struct hclge_comm_stats_str strs[],
548                                   int size, u8 *data)
549 {
550         char *buff = (char *)data;
551         u32 i;
552
553         if (stringset != ETH_SS_STATS)
554                 return buff;
555
556         for (i = 0; i < size; i++) {
557                 snprintf(buff, ETH_GSTRING_LEN,
558                          strs[i].desc);
559                 buff = buff + ETH_GSTRING_LEN;
560         }
561
562         return (u8 *)buff;
563 }
564
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
566 {
567         struct hnae3_handle *handle;
568         int status;
569
570         handle = &hdev->vport[0].nic;
571         if (handle->client) {
572                 status = hclge_tqps_update_stats(handle);
573                 if (status) {
574                         dev_err(&hdev->pdev->dev,
575                                 "Update TQPS stats fail, status = %d.\n",
576                                 status);
577                 }
578         }
579
580         status = hclge_mac_update_stats(hdev);
581         if (status)
582                 dev_err(&hdev->pdev->dev,
583                         "Update MAC stats fail, status = %d.\n", status);
584 }
585
586 static void hclge_update_stats(struct hnae3_handle *handle,
587                                struct net_device_stats *net_stats)
588 {
589         struct hclge_vport *vport = hclge_get_vport(handle);
590         struct hclge_dev *hdev = vport->back;
591         int status;
592
593         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
594                 return;
595
596         status = hclge_mac_update_stats(hdev);
597         if (status)
598                 dev_err(&hdev->pdev->dev,
599                         "Update MAC stats fail, status = %d.\n",
600                         status);
601
602         status = hclge_tqps_update_stats(handle);
603         if (status)
604                 dev_err(&hdev->pdev->dev,
605                         "Update TQPS stats fail, status = %d.\n",
606                         status);
607
608         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
609 }
610
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614                 HNAE3_SUPPORT_PHY_LOOPBACK |\
615                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
617
618         struct hclge_vport *vport = hclge_get_vport(handle);
619         struct hclge_dev *hdev = vport->back;
620         int count = 0;
621
622         /* Loopback test support rules:
623          * mac: only GE mode support
624          * serdes: all mac mode will support include GE/XGE/LGE/CGE
625          * phy: only support when phy device exist on board
626          */
627         if (stringset == ETH_SS_TEST) {
628                 /* clear loopback bit flags at first */
629                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630                 if (hdev->pdev->revision >= 0x21 ||
631                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
634                         count += 1;
635                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
636                 }
637
638                 count += 2;
639                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641         } else if (stringset == ETH_SS_STATS) {
642                 count = ARRAY_SIZE(g_mac_stats_string) +
643                         hclge_tqps_get_sset_count(handle, stringset);
644         }
645
646         return count;
647 }
648
649 static void hclge_get_strings(struct hnae3_handle *handle,
650                               u32 stringset,
651                               u8 *data)
652 {
653         u8 *p = (char *)data;
654         int size;
655
656         if (stringset == ETH_SS_STATS) {
657                 size = ARRAY_SIZE(g_mac_stats_string);
658                 p = hclge_comm_get_strings(stringset,
659                                            g_mac_stats_string,
660                                            size,
661                                            p);
662                 p = hclge_tqps_get_strings(handle, p);
663         } else if (stringset == ETH_SS_TEST) {
664                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
665                         memcpy(p,
666                                hns3_nic_test_strs[HNAE3_LOOP_APP],
667                                ETH_GSTRING_LEN);
668                         p += ETH_GSTRING_LEN;
669                 }
670                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
671                         memcpy(p,
672                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
673                                ETH_GSTRING_LEN);
674                         p += ETH_GSTRING_LEN;
675                 }
676                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
677                         memcpy(p,
678                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
679                                ETH_GSTRING_LEN);
680                         p += ETH_GSTRING_LEN;
681                 }
682                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
683                         memcpy(p,
684                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
685                                ETH_GSTRING_LEN);
686                         p += ETH_GSTRING_LEN;
687                 }
688         }
689 }
690
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
692 {
693         struct hclge_vport *vport = hclge_get_vport(handle);
694         struct hclge_dev *hdev = vport->back;
695         u64 *p;
696
697         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
698                                  g_mac_stats_string,
699                                  ARRAY_SIZE(g_mac_stats_string),
700                                  data);
701         p = hclge_tqps_get_stats(handle, p);
702 }
703
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
705                                      u64 *rx_cnt)
706 {
707         struct hclge_vport *vport = hclge_get_vport(handle);
708         struct hclge_dev *hdev = vport->back;
709
710         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
712 }
713
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715                                    struct hclge_func_status_cmd *status)
716 {
717         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
718                 return -EINVAL;
719
720         /* Set the pf to main pf */
721         if (status->pf_state & HCLGE_PF_STATE_MAIN)
722                 hdev->flag |= HCLGE_FLAG_MAIN;
723         else
724                 hdev->flag &= ~HCLGE_FLAG_MAIN;
725
726         return 0;
727 }
728
729 static int hclge_query_function_status(struct hclge_dev *hdev)
730 {
731         struct hclge_func_status_cmd *req;
732         struct hclge_desc desc;
733         int timeout = 0;
734         int ret;
735
736         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737         req = (struct hclge_func_status_cmd *)desc.data;
738
739         do {
740                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
741                 if (ret) {
742                         dev_err(&hdev->pdev->dev,
743                                 "query function status failed %d.\n",
744                                 ret);
745
746                         return ret;
747                 }
748
749                 /* Check pf reset is done */
750                 if (req->pf_state)
751                         break;
752                 usleep_range(1000, 2000);
753         } while (timeout++ < 5);
754
755         ret = hclge_parse_func_status(hdev, req);
756
757         return ret;
758 }
759
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
761 {
762         struct hclge_pf_res_cmd *req;
763         struct hclge_desc desc;
764         int ret;
765
766         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
768         if (ret) {
769                 dev_err(&hdev->pdev->dev,
770                         "query pf resource failed %d.\n", ret);
771                 return ret;
772         }
773
774         req = (struct hclge_pf_res_cmd *)desc.data;
775         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
777
778         if (req->tx_buf_size)
779                 hdev->tx_buf_size =
780                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
781         else
782                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
783
784         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
785
786         if (req->dv_buf_size)
787                 hdev->dv_buf_size =
788                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
789         else
790                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
791
792         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
793
794         if (hnae3_dev_roce_supported(hdev)) {
795                 hdev->roce_base_msix_offset =
796                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
798                 hdev->num_roce_msi =
799                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
801
802                 /* PF should have NIC vectors and Roce vectors,
803                  * NIC vectors are queued before Roce vectors.
804                  */
805                 hdev->num_msi = hdev->num_roce_msi  +
806                                 hdev->roce_base_msix_offset;
807         } else {
808                 hdev->num_msi =
809                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
811         }
812
813         return 0;
814 }
815
816 static int hclge_parse_speed(int speed_cmd, int *speed)
817 {
818         switch (speed_cmd) {
819         case 6:
820                 *speed = HCLGE_MAC_SPEED_10M;
821                 break;
822         case 7:
823                 *speed = HCLGE_MAC_SPEED_100M;
824                 break;
825         case 0:
826                 *speed = HCLGE_MAC_SPEED_1G;
827                 break;
828         case 1:
829                 *speed = HCLGE_MAC_SPEED_10G;
830                 break;
831         case 2:
832                 *speed = HCLGE_MAC_SPEED_25G;
833                 break;
834         case 3:
835                 *speed = HCLGE_MAC_SPEED_40G;
836                 break;
837         case 4:
838                 *speed = HCLGE_MAC_SPEED_50G;
839                 break;
840         case 5:
841                 *speed = HCLGE_MAC_SPEED_100G;
842                 break;
843         default:
844                 return -EINVAL;
845         }
846
847         return 0;
848 }
849
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
851 {
852         struct hclge_vport *vport = hclge_get_vport(handle);
853         struct hclge_dev *hdev = vport->back;
854         u32 speed_ability = hdev->hw.mac.speed_ability;
855         u32 speed_bit = 0;
856
857         switch (speed) {
858         case HCLGE_MAC_SPEED_10M:
859                 speed_bit = HCLGE_SUPPORT_10M_BIT;
860                 break;
861         case HCLGE_MAC_SPEED_100M:
862                 speed_bit = HCLGE_SUPPORT_100M_BIT;
863                 break;
864         case HCLGE_MAC_SPEED_1G:
865                 speed_bit = HCLGE_SUPPORT_1G_BIT;
866                 break;
867         case HCLGE_MAC_SPEED_10G:
868                 speed_bit = HCLGE_SUPPORT_10G_BIT;
869                 break;
870         case HCLGE_MAC_SPEED_25G:
871                 speed_bit = HCLGE_SUPPORT_25G_BIT;
872                 break;
873         case HCLGE_MAC_SPEED_40G:
874                 speed_bit = HCLGE_SUPPORT_40G_BIT;
875                 break;
876         case HCLGE_MAC_SPEED_50G:
877                 speed_bit = HCLGE_SUPPORT_50G_BIT;
878                 break;
879         case HCLGE_MAC_SPEED_100G:
880                 speed_bit = HCLGE_SUPPORT_100G_BIT;
881                 break;
882         default:
883                 return -EINVAL;
884         }
885
886         if (speed_bit & speed_ability)
887                 return 0;
888
889         return -EINVAL;
890 }
891
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
893 {
894         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
896                                  mac->supported);
897         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
899                                  mac->supported);
900         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
902                                  mac->supported);
903         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
905                                  mac->supported);
906         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
908                                  mac->supported);
909 }
910
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
912 {
913         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
915                                  mac->supported);
916         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
918                                  mac->supported);
919         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
921                                  mac->supported);
922         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
924                                  mac->supported);
925         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
927                                  mac->supported);
928 }
929
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
931 {
932         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
934                                  mac->supported);
935         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
937                                  mac->supported);
938         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
940                                  mac->supported);
941         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
943                                  mac->supported);
944         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
946                                  mac->supported);
947 }
948
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
950 {
951         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
953                                  mac->supported);
954         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
956                                  mac->supported);
957         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
959                                  mac->supported);
960         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
962                                  mac->supported);
963         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
965                                  mac->supported);
966         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
968                                  mac->supported);
969 }
970
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
972 {
973         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
975
976         switch (mac->speed) {
977         case HCLGE_MAC_SPEED_10G:
978         case HCLGE_MAC_SPEED_40G:
979                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
980                                  mac->supported);
981                 mac->fec_ability =
982                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
983                 break;
984         case HCLGE_MAC_SPEED_25G:
985         case HCLGE_MAC_SPEED_50G:
986                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
987                                  mac->supported);
988                 mac->fec_ability =
989                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
990                         BIT(HNAE3_FEC_AUTO);
991                 break;
992         case HCLGE_MAC_SPEED_100G:
993                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
995                 break;
996         default:
997                 mac->fec_ability = 0;
998                 break;
999         }
1000 }
1001
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1003                                         u8 speed_ability)
1004 {
1005         struct hclge_mac *mac = &hdev->hw.mac;
1006
1007         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1009                                  mac->supported);
1010
1011         hclge_convert_setting_sr(mac, speed_ability);
1012         hclge_convert_setting_lr(mac, speed_ability);
1013         hclge_convert_setting_cr(mac, speed_ability);
1014         if (hdev->pdev->revision >= 0x21)
1015                 hclge_convert_setting_fec(mac);
1016
1017         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1020 }
1021
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1023                                             u8 speed_ability)
1024 {
1025         struct hclge_mac *mac = &hdev->hw.mac;
1026
1027         hclge_convert_setting_kr(mac, speed_ability);
1028         if (hdev->pdev->revision >= 0x21)
1029                 hclge_convert_setting_fec(mac);
1030         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1033 }
1034
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1036                                          u8 speed_ability)
1037 {
1038         unsigned long *supported = hdev->hw.mac.supported;
1039
1040         /* default to support all speed for GE port */
1041         if (!speed_ability)
1042                 speed_ability = HCLGE_SUPPORT_GE;
1043
1044         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1046                                  supported);
1047
1048         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1050                                  supported);
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1052                                  supported);
1053         }
1054
1055         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1058         }
1059
1060         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1063 }
1064
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1066 {
1067         u8 media_type = hdev->hw.mac.media_type;
1068
1069         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072                 hclge_parse_copper_link_mode(hdev, speed_ability);
1073         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1075 }
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1077 {
1078         struct hclge_cfg_param_cmd *req;
1079         u64 mac_addr_tmp_high;
1080         u64 mac_addr_tmp;
1081         int i;
1082
1083         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1084
1085         /* get the configuration */
1086         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1087                                               HCLGE_CFG_VMDQ_M,
1088                                               HCLGE_CFG_VMDQ_S);
1089         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092                                             HCLGE_CFG_TQP_DESC_N_M,
1093                                             HCLGE_CFG_TQP_DESC_N_S);
1094
1095         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096                                         HCLGE_CFG_PHY_ADDR_M,
1097                                         HCLGE_CFG_PHY_ADDR_S);
1098         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099                                           HCLGE_CFG_MEDIA_TP_M,
1100                                           HCLGE_CFG_MEDIA_TP_S);
1101         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102                                           HCLGE_CFG_RX_BUF_LEN_M,
1103                                           HCLGE_CFG_RX_BUF_LEN_S);
1104         /* get mac_address */
1105         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107                                             HCLGE_CFG_MAC_ADDR_H_M,
1108                                             HCLGE_CFG_MAC_ADDR_H_S);
1109
1110         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1111
1112         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113                                              HCLGE_CFG_DEFAULT_SPEED_M,
1114                                              HCLGE_CFG_DEFAULT_SPEED_S);
1115         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116                                             HCLGE_CFG_RSS_SIZE_M,
1117                                             HCLGE_CFG_RSS_SIZE_S);
1118
1119         for (i = 0; i < ETH_ALEN; i++)
1120                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1121
1122         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1124
1125         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126                                              HCLGE_CFG_SPEED_ABILITY_M,
1127                                              HCLGE_CFG_SPEED_ABILITY_S);
1128         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1130                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1131         if (!cfg->umv_space)
1132                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1133 }
1134
1135 /* hclge_get_cfg: query the static parameter from flash
1136  * @hdev: pointer to struct hclge_dev
1137  * @hcfg: the config structure to be getted
1138  */
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1140 {
1141         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142         struct hclge_cfg_param_cmd *req;
1143         int i, ret;
1144
1145         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1146                 u32 offset = 0;
1147
1148                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1150                                            true);
1151                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153                 /* Len should be united by 4 bytes when send to hardware */
1154                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156                 req->offset = cpu_to_le32(offset);
1157         }
1158
1159         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1160         if (ret) {
1161                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1162                 return ret;
1163         }
1164
1165         hclge_parse_cfg(hcfg, desc);
1166
1167         return 0;
1168 }
1169
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1171 {
1172         int ret;
1173
1174         ret = hclge_query_function_status(hdev);
1175         if (ret) {
1176                 dev_err(&hdev->pdev->dev,
1177                         "query function status error %d.\n", ret);
1178                 return ret;
1179         }
1180
1181         /* get pf resource */
1182         ret = hclge_query_pf_resource(hdev);
1183         if (ret)
1184                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1185
1186         return ret;
1187 }
1188
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1190 {
1191 #define HCLGE_MIN_TX_DESC       64
1192 #define HCLGE_MIN_RX_DESC       64
1193
1194         if (!is_kdump_kernel())
1195                 return;
1196
1197         dev_info(&hdev->pdev->dev,
1198                  "Running kdump kernel. Using minimal resources\n");
1199
1200         /* minimal queue pairs equals to the number of vports */
1201         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1204 }
1205
1206 static int hclge_configure(struct hclge_dev *hdev)
1207 {
1208         struct hclge_cfg cfg;
1209         int ret, i;
1210
1211         ret = hclge_get_cfg(hdev, &cfg);
1212         if (ret) {
1213                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1214                 return ret;
1215         }
1216
1217         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218         hdev->base_tqp_pid = 0;
1219         hdev->rss_size_max = cfg.rss_size_max;
1220         hdev->rx_buf_len = cfg.rx_buf_len;
1221         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222         hdev->hw.mac.media_type = cfg.media_type;
1223         hdev->hw.mac.phy_addr = cfg.phy_addr;
1224         hdev->num_tx_desc = cfg.tqp_desc_num;
1225         hdev->num_rx_desc = cfg.tqp_desc_num;
1226         hdev->tm_info.num_pg = 1;
1227         hdev->tc_max = cfg.tc_num;
1228         hdev->tm_info.hw_pfc_map = 0;
1229         hdev->wanted_umv_size = cfg.umv_space;
1230
1231         if (hnae3_dev_fd_supported(hdev)) {
1232                 hdev->fd_en = true;
1233                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1234         }
1235
1236         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1237         if (ret) {
1238                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1239                 return ret;
1240         }
1241
1242         hclge_parse_link_mode(hdev, cfg.speed_ability);
1243
1244         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245             (hdev->tc_max < 1)) {
1246                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1247                          hdev->tc_max);
1248                 hdev->tc_max = 1;
1249         }
1250
1251         /* Dev does not support DCB */
1252         if (!hnae3_dev_dcb_supported(hdev)) {
1253                 hdev->tc_max = 1;
1254                 hdev->pfc_max = 0;
1255         } else {
1256                 hdev->pfc_max = hdev->tc_max;
1257         }
1258
1259         hdev->tm_info.num_tc = 1;
1260
1261         /* Currently not support uncontiuous tc */
1262         for (i = 0; i < hdev->tm_info.num_tc; i++)
1263                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1264
1265         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1266
1267         hclge_init_kdump_kernel_config(hdev);
1268
1269         return ret;
1270 }
1271
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1273                             int tso_mss_max)
1274 {
1275         struct hclge_cfg_tso_status_cmd *req;
1276         struct hclge_desc desc;
1277         u16 tso_mss;
1278
1279         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1280
1281         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1282
1283         tso_mss = 0;
1284         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286         req->tso_mss_min = cpu_to_le16(tso_mss);
1287
1288         tso_mss = 0;
1289         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291         req->tso_mss_max = cpu_to_le16(tso_mss);
1292
1293         return hclge_cmd_send(&hdev->hw, &desc, 1);
1294 }
1295
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1297 {
1298         struct hclge_cfg_gro_status_cmd *req;
1299         struct hclge_desc desc;
1300         int ret;
1301
1302         if (!hnae3_dev_gro_supported(hdev))
1303                 return 0;
1304
1305         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1307
1308         req->gro_en = cpu_to_le16(en ? 1 : 0);
1309
1310         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1311         if (ret)
1312                 dev_err(&hdev->pdev->dev,
1313                         "GRO hardware config cmd failed, ret = %d\n", ret);
1314
1315         return ret;
1316 }
1317
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1319 {
1320         struct hclge_tqp *tqp;
1321         int i;
1322
1323         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1325         if (!hdev->htqp)
1326                 return -ENOMEM;
1327
1328         tqp = hdev->htqp;
1329
1330         for (i = 0; i < hdev->num_tqps; i++) {
1331                 tqp->dev = &hdev->pdev->dev;
1332                 tqp->index = i;
1333
1334                 tqp->q.ae_algo = &ae_algo;
1335                 tqp->q.buf_size = hdev->rx_buf_len;
1336                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339                         i * HCLGE_TQP_REG_SIZE;
1340
1341                 tqp++;
1342         }
1343
1344         return 0;
1345 }
1346
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1349 {
1350         struct hclge_tqp_map_cmd *req;
1351         struct hclge_desc desc;
1352         int ret;
1353
1354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1355
1356         req = (struct hclge_tqp_map_cmd *)desc.data;
1357         req->tqp_id = cpu_to_le16(tqp_pid);
1358         req->tqp_vf = func_id;
1359         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360                         1 << HCLGE_TQP_MAP_EN_B;
1361         req->tqp_vid = cpu_to_le16(tqp_vid);
1362
1363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1364         if (ret)
1365                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1366
1367         return ret;
1368 }
1369
1370 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1371 {
1372         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373         struct hclge_dev *hdev = vport->back;
1374         int i, alloced;
1375
1376         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377              alloced < num_tqps; i++) {
1378                 if (!hdev->htqp[i].alloced) {
1379                         hdev->htqp[i].q.handle = &vport->nic;
1380                         hdev->htqp[i].q.tqp_index = alloced;
1381                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384                         hdev->htqp[i].alloced = true;
1385                         alloced++;
1386                 }
1387         }
1388         vport->alloc_tqps = alloced;
1389         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1391
1392         return 0;
1393 }
1394
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396                             u16 num_tx_desc, u16 num_rx_desc)
1397
1398 {
1399         struct hnae3_handle *nic = &vport->nic;
1400         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401         struct hclge_dev *hdev = vport->back;
1402         int ret;
1403
1404         kinfo->num_tx_desc = num_tx_desc;
1405         kinfo->num_rx_desc = num_rx_desc;
1406
1407         kinfo->rx_buf_len = hdev->rx_buf_len;
1408
1409         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1411         if (!kinfo->tqp)
1412                 return -ENOMEM;
1413
1414         ret = hclge_assign_tqp(vport, num_tqps);
1415         if (ret)
1416                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1417
1418         return ret;
1419 }
1420
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422                                   struct hclge_vport *vport)
1423 {
1424         struct hnae3_handle *nic = &vport->nic;
1425         struct hnae3_knic_private_info *kinfo;
1426         u16 i;
1427
1428         kinfo = &nic->kinfo;
1429         for (i = 0; i < vport->alloc_tqps; i++) {
1430                 struct hclge_tqp *q =
1431                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1432                 bool is_pf;
1433                 int ret;
1434
1435                 is_pf = !(vport->vport_id);
1436                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1437                                              i, is_pf);
1438                 if (ret)
1439                         return ret;
1440         }
1441
1442         return 0;
1443 }
1444
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1446 {
1447         struct hclge_vport *vport = hdev->vport;
1448         u16 i, num_vport;
1449
1450         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451         for (i = 0; i < num_vport; i++) {
1452                 int ret;
1453
1454                 ret = hclge_map_tqp_to_vport(hdev, vport);
1455                 if (ret)
1456                         return ret;
1457
1458                 vport++;
1459         }
1460
1461         return 0;
1462 }
1463
1464 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1465 {
1466         struct hnae3_handle *nic = &vport->nic;
1467         struct hclge_dev *hdev = vport->back;
1468         int ret;
1469
1470         nic->pdev = hdev->pdev;
1471         nic->ae_algo = &ae_algo;
1472         nic->numa_node_mask = hdev->numa_node_mask;
1473
1474         ret = hclge_knic_setup(vport, num_tqps,
1475                                hdev->num_tx_desc, hdev->num_rx_desc);
1476         if (ret)
1477                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1478
1479         return ret;
1480 }
1481
1482 static int hclge_alloc_vport(struct hclge_dev *hdev)
1483 {
1484         struct pci_dev *pdev = hdev->pdev;
1485         struct hclge_vport *vport;
1486         u32 tqp_main_vport;
1487         u32 tqp_per_vport;
1488         int num_vport, i;
1489         int ret;
1490
1491         /* We need to alloc a vport for main NIC of PF */
1492         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1493
1494         if (hdev->num_tqps < num_vport) {
1495                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1496                         hdev->num_tqps, num_vport);
1497                 return -EINVAL;
1498         }
1499
1500         /* Alloc the same number of TQPs for every vport */
1501         tqp_per_vport = hdev->num_tqps / num_vport;
1502         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1503
1504         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1505                              GFP_KERNEL);
1506         if (!vport)
1507                 return -ENOMEM;
1508
1509         hdev->vport = vport;
1510         hdev->num_alloc_vport = num_vport;
1511
1512         if (IS_ENABLED(CONFIG_PCI_IOV))
1513                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1514
1515         for (i = 0; i < num_vport; i++) {
1516                 vport->back = hdev;
1517                 vport->vport_id = i;
1518                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1519                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1520                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1521                 INIT_LIST_HEAD(&vport->vlan_list);
1522                 INIT_LIST_HEAD(&vport->uc_mac_list);
1523                 INIT_LIST_HEAD(&vport->mc_mac_list);
1524
1525                 if (i == 0)
1526                         ret = hclge_vport_setup(vport, tqp_main_vport);
1527                 else
1528                         ret = hclge_vport_setup(vport, tqp_per_vport);
1529                 if (ret) {
1530                         dev_err(&pdev->dev,
1531                                 "vport setup failed for vport %d, %d\n",
1532                                 i, ret);
1533                         return ret;
1534                 }
1535
1536                 vport++;
1537         }
1538
1539         return 0;
1540 }
1541
1542 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1543                                     struct hclge_pkt_buf_alloc *buf_alloc)
1544 {
1545 /* TX buffer size is unit by 128 byte */
1546 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1547 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1548         struct hclge_tx_buff_alloc_cmd *req;
1549         struct hclge_desc desc;
1550         int ret;
1551         u8 i;
1552
1553         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1554
1555         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1556         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1557                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1558
1559                 req->tx_pkt_buff[i] =
1560                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1561                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1562         }
1563
1564         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1565         if (ret)
1566                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1567                         ret);
1568
1569         return ret;
1570 }
1571
1572 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1573                                  struct hclge_pkt_buf_alloc *buf_alloc)
1574 {
1575         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1576
1577         if (ret)
1578                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1579
1580         return ret;
1581 }
1582
1583 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1584 {
1585         int i, cnt = 0;
1586
1587         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1588                 if (hdev->hw_tc_map & BIT(i))
1589                         cnt++;
1590         return cnt;
1591 }
1592
1593 /* Get the number of pfc enabled TCs, which have private buffer */
1594 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1595                                   struct hclge_pkt_buf_alloc *buf_alloc)
1596 {
1597         struct hclge_priv_buf *priv;
1598         int i, cnt = 0;
1599
1600         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1601                 priv = &buf_alloc->priv_buf[i];
1602                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1603                     priv->enable)
1604                         cnt++;
1605         }
1606
1607         return cnt;
1608 }
1609
1610 /* Get the number of pfc disabled TCs, which have private buffer */
1611 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1612                                      struct hclge_pkt_buf_alloc *buf_alloc)
1613 {
1614         struct hclge_priv_buf *priv;
1615         int i, cnt = 0;
1616
1617         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1618                 priv = &buf_alloc->priv_buf[i];
1619                 if (hdev->hw_tc_map & BIT(i) &&
1620                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1621                     priv->enable)
1622                         cnt++;
1623         }
1624
1625         return cnt;
1626 }
1627
1628 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1629 {
1630         struct hclge_priv_buf *priv;
1631         u32 rx_priv = 0;
1632         int i;
1633
1634         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1635                 priv = &buf_alloc->priv_buf[i];
1636                 if (priv->enable)
1637                         rx_priv += priv->buf_size;
1638         }
1639         return rx_priv;
1640 }
1641
1642 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1643 {
1644         u32 i, total_tx_size = 0;
1645
1646         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1647                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1648
1649         return total_tx_size;
1650 }
1651
1652 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1653                                 struct hclge_pkt_buf_alloc *buf_alloc,
1654                                 u32 rx_all)
1655 {
1656         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1657         u32 tc_num = hclge_get_tc_num(hdev);
1658         u32 shared_buf, aligned_mps;
1659         u32 rx_priv;
1660         int i;
1661
1662         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1663
1664         if (hnae3_dev_dcb_supported(hdev))
1665                 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1666         else
1667                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1668                                         + hdev->dv_buf_size;
1669
1670         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1671         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1672                              HCLGE_BUF_SIZE_UNIT);
1673
1674         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1675         if (rx_all < rx_priv + shared_std)
1676                 return false;
1677
1678         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1679         buf_alloc->s_buf.buf_size = shared_buf;
1680         if (hnae3_dev_dcb_supported(hdev)) {
1681                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1682                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1683                         - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1684         } else {
1685                 buf_alloc->s_buf.self.high = aligned_mps +
1686                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1687                 buf_alloc->s_buf.self.low = aligned_mps;
1688         }
1689
1690         if (hnae3_dev_dcb_supported(hdev)) {
1691                 if (tc_num)
1692                         hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1693                 else
1694                         hi_thrd = shared_buf - hdev->dv_buf_size;
1695
1696                 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1697                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1698                 lo_thrd = hi_thrd - aligned_mps / 2;
1699         } else {
1700                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1701                 lo_thrd = aligned_mps;
1702         }
1703
1704         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1705                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1706                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1707         }
1708
1709         return true;
1710 }
1711
1712 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1713                                 struct hclge_pkt_buf_alloc *buf_alloc)
1714 {
1715         u32 i, total_size;
1716
1717         total_size = hdev->pkt_buf_size;
1718
1719         /* alloc tx buffer for all enabled tc */
1720         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1721                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1722
1723                 if (hdev->hw_tc_map & BIT(i)) {
1724                         if (total_size < hdev->tx_buf_size)
1725                                 return -ENOMEM;
1726
1727                         priv->tx_buf_size = hdev->tx_buf_size;
1728                 } else {
1729                         priv->tx_buf_size = 0;
1730                 }
1731
1732                 total_size -= priv->tx_buf_size;
1733         }
1734
1735         return 0;
1736 }
1737
1738 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1739                                   struct hclge_pkt_buf_alloc *buf_alloc)
1740 {
1741         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1742         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1743         int i;
1744
1745         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1746                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1747
1748                 priv->enable = 0;
1749                 priv->wl.low = 0;
1750                 priv->wl.high = 0;
1751                 priv->buf_size = 0;
1752
1753                 if (!(hdev->hw_tc_map & BIT(i)))
1754                         continue;
1755
1756                 priv->enable = 1;
1757
1758                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1759                         priv->wl.low = max ? aligned_mps : 256;
1760                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1761                                                 HCLGE_BUF_SIZE_UNIT);
1762                 } else {
1763                         priv->wl.low = 0;
1764                         priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1765                 }
1766
1767                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1768         }
1769
1770         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1771 }
1772
1773 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1774                                           struct hclge_pkt_buf_alloc *buf_alloc)
1775 {
1776         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1777         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1778         int i;
1779
1780         /* let the last to be cleared first */
1781         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1782                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1783
1784                 if (hdev->hw_tc_map & BIT(i) &&
1785                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1786                         /* Clear the no pfc TC private buffer */
1787                         priv->wl.low = 0;
1788                         priv->wl.high = 0;
1789                         priv->buf_size = 0;
1790                         priv->enable = 0;
1791                         no_pfc_priv_num--;
1792                 }
1793
1794                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1795                     no_pfc_priv_num == 0)
1796                         break;
1797         }
1798
1799         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1800 }
1801
1802 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1803                                         struct hclge_pkt_buf_alloc *buf_alloc)
1804 {
1805         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1806         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1807         int i;
1808
1809         /* let the last to be cleared first */
1810         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1811                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1812
1813                 if (hdev->hw_tc_map & BIT(i) &&
1814                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1815                         /* Reduce the number of pfc TC with private buffer */
1816                         priv->wl.low = 0;
1817                         priv->enable = 0;
1818                         priv->wl.high = 0;
1819                         priv->buf_size = 0;
1820                         pfc_priv_num--;
1821                 }
1822
1823                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1824                     pfc_priv_num == 0)
1825                         break;
1826         }
1827
1828         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1829 }
1830
1831 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1832  * @hdev: pointer to struct hclge_dev
1833  * @buf_alloc: pointer to buffer calculation data
1834  * @return: 0: calculate sucessful, negative: fail
1835  */
1836 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1837                                 struct hclge_pkt_buf_alloc *buf_alloc)
1838 {
1839         /* When DCB is not supported, rx private buffer is not allocated. */
1840         if (!hnae3_dev_dcb_supported(hdev)) {
1841                 u32 rx_all = hdev->pkt_buf_size;
1842
1843                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1844                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1845                         return -ENOMEM;
1846
1847                 return 0;
1848         }
1849
1850         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1851                 return 0;
1852
1853         /* try to decrease the buffer size */
1854         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1855                 return 0;
1856
1857         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1858                 return 0;
1859
1860         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1861                 return 0;
1862
1863         return -ENOMEM;
1864 }
1865
1866 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1867                                    struct hclge_pkt_buf_alloc *buf_alloc)
1868 {
1869         struct hclge_rx_priv_buff_cmd *req;
1870         struct hclge_desc desc;
1871         int ret;
1872         int i;
1873
1874         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1875         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1876
1877         /* Alloc private buffer TCs */
1878         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1879                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1880
1881                 req->buf_num[i] =
1882                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1883                 req->buf_num[i] |=
1884                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1885         }
1886
1887         req->shared_buf =
1888                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1889                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1890
1891         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1892         if (ret)
1893                 dev_err(&hdev->pdev->dev,
1894                         "rx private buffer alloc cmd failed %d\n", ret);
1895
1896         return ret;
1897 }
1898
1899 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1900                                    struct hclge_pkt_buf_alloc *buf_alloc)
1901 {
1902         struct hclge_rx_priv_wl_buf *req;
1903         struct hclge_priv_buf *priv;
1904         struct hclge_desc desc[2];
1905         int i, j;
1906         int ret;
1907
1908         for (i = 0; i < 2; i++) {
1909                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1910                                            false);
1911                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1912
1913                 /* The first descriptor set the NEXT bit to 1 */
1914                 if (i == 0)
1915                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1916                 else
1917                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1918
1919                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1920                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1921
1922                         priv = &buf_alloc->priv_buf[idx];
1923                         req->tc_wl[j].high =
1924                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1925                         req->tc_wl[j].high |=
1926                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1927                         req->tc_wl[j].low =
1928                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1929                         req->tc_wl[j].low |=
1930                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1931                 }
1932         }
1933
1934         /* Send 2 descriptor at one time */
1935         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1936         if (ret)
1937                 dev_err(&hdev->pdev->dev,
1938                         "rx private waterline config cmd failed %d\n",
1939                         ret);
1940         return ret;
1941 }
1942
1943 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1944                                     struct hclge_pkt_buf_alloc *buf_alloc)
1945 {
1946         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1947         struct hclge_rx_com_thrd *req;
1948         struct hclge_desc desc[2];
1949         struct hclge_tc_thrd *tc;
1950         int i, j;
1951         int ret;
1952
1953         for (i = 0; i < 2; i++) {
1954                 hclge_cmd_setup_basic_desc(&desc[i],
1955                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1956                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1957
1958                 /* The first descriptor set the NEXT bit to 1 */
1959                 if (i == 0)
1960                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1961                 else
1962                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1963
1964                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1965                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1966
1967                         req->com_thrd[j].high =
1968                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1969                         req->com_thrd[j].high |=
1970                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1971                         req->com_thrd[j].low =
1972                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1973                         req->com_thrd[j].low |=
1974                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1975                 }
1976         }
1977
1978         /* Send 2 descriptors at one time */
1979         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1980         if (ret)
1981                 dev_err(&hdev->pdev->dev,
1982                         "common threshold config cmd failed %d\n", ret);
1983         return ret;
1984 }
1985
1986 static int hclge_common_wl_config(struct hclge_dev *hdev,
1987                                   struct hclge_pkt_buf_alloc *buf_alloc)
1988 {
1989         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1990         struct hclge_rx_com_wl *req;
1991         struct hclge_desc desc;
1992         int ret;
1993
1994         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1995
1996         req = (struct hclge_rx_com_wl *)desc.data;
1997         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1998         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1999
2000         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2001         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2002
2003         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2004         if (ret)
2005                 dev_err(&hdev->pdev->dev,
2006                         "common waterline config cmd failed %d\n", ret);
2007
2008         return ret;
2009 }
2010
2011 int hclge_buffer_alloc(struct hclge_dev *hdev)
2012 {
2013         struct hclge_pkt_buf_alloc *pkt_buf;
2014         int ret;
2015
2016         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2017         if (!pkt_buf)
2018                 return -ENOMEM;
2019
2020         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2021         if (ret) {
2022                 dev_err(&hdev->pdev->dev,
2023                         "could not calc tx buffer size for all TCs %d\n", ret);
2024                 goto out;
2025         }
2026
2027         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2028         if (ret) {
2029                 dev_err(&hdev->pdev->dev,
2030                         "could not alloc tx buffers %d\n", ret);
2031                 goto out;
2032         }
2033
2034         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2035         if (ret) {
2036                 dev_err(&hdev->pdev->dev,
2037                         "could not calc rx priv buffer size for all TCs %d\n",
2038                         ret);
2039                 goto out;
2040         }
2041
2042         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2043         if (ret) {
2044                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2045                         ret);
2046                 goto out;
2047         }
2048
2049         if (hnae3_dev_dcb_supported(hdev)) {
2050                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2051                 if (ret) {
2052                         dev_err(&hdev->pdev->dev,
2053                                 "could not configure rx private waterline %d\n",
2054                                 ret);
2055                         goto out;
2056                 }
2057
2058                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2059                 if (ret) {
2060                         dev_err(&hdev->pdev->dev,
2061                                 "could not configure common threshold %d\n",
2062                                 ret);
2063                         goto out;
2064                 }
2065         }
2066
2067         ret = hclge_common_wl_config(hdev, pkt_buf);
2068         if (ret)
2069                 dev_err(&hdev->pdev->dev,
2070                         "could not configure common waterline %d\n", ret);
2071
2072 out:
2073         kfree(pkt_buf);
2074         return ret;
2075 }
2076
2077 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2078 {
2079         struct hnae3_handle *roce = &vport->roce;
2080         struct hnae3_handle *nic = &vport->nic;
2081
2082         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2083
2084         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2085             vport->back->num_msi_left == 0)
2086                 return -EINVAL;
2087
2088         roce->rinfo.base_vector = vport->back->roce_base_vector;
2089
2090         roce->rinfo.netdev = nic->kinfo.netdev;
2091         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2092
2093         roce->pdev = nic->pdev;
2094         roce->ae_algo = nic->ae_algo;
2095         roce->numa_node_mask = nic->numa_node_mask;
2096
2097         return 0;
2098 }
2099
2100 static int hclge_init_msi(struct hclge_dev *hdev)
2101 {
2102         struct pci_dev *pdev = hdev->pdev;
2103         int vectors;
2104         int i;
2105
2106         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2107                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2108         if (vectors < 0) {
2109                 dev_err(&pdev->dev,
2110                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2111                         vectors);
2112                 return vectors;
2113         }
2114         if (vectors < hdev->num_msi)
2115                 dev_warn(&hdev->pdev->dev,
2116                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2117                          hdev->num_msi, vectors);
2118
2119         hdev->num_msi = vectors;
2120         hdev->num_msi_left = vectors;
2121         hdev->base_msi_vector = pdev->irq;
2122         hdev->roce_base_vector = hdev->base_msi_vector +
2123                                 hdev->roce_base_msix_offset;
2124
2125         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2126                                            sizeof(u16), GFP_KERNEL);
2127         if (!hdev->vector_status) {
2128                 pci_free_irq_vectors(pdev);
2129                 return -ENOMEM;
2130         }
2131
2132         for (i = 0; i < hdev->num_msi; i++)
2133                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2134
2135         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2136                                         sizeof(int), GFP_KERNEL);
2137         if (!hdev->vector_irq) {
2138                 pci_free_irq_vectors(pdev);
2139                 return -ENOMEM;
2140         }
2141
2142         return 0;
2143 }
2144
2145 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2146 {
2147
2148         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2149                 duplex = HCLGE_MAC_FULL;
2150
2151         return duplex;
2152 }
2153
2154 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2155                                       u8 duplex)
2156 {
2157         struct hclge_config_mac_speed_dup_cmd *req;
2158         struct hclge_desc desc;
2159         int ret;
2160
2161         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2162
2163         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2164
2165         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2166
2167         switch (speed) {
2168         case HCLGE_MAC_SPEED_10M:
2169                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2170                                 HCLGE_CFG_SPEED_S, 6);
2171                 break;
2172         case HCLGE_MAC_SPEED_100M:
2173                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2174                                 HCLGE_CFG_SPEED_S, 7);
2175                 break;
2176         case HCLGE_MAC_SPEED_1G:
2177                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2178                                 HCLGE_CFG_SPEED_S, 0);
2179                 break;
2180         case HCLGE_MAC_SPEED_10G:
2181                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2182                                 HCLGE_CFG_SPEED_S, 1);
2183                 break;
2184         case HCLGE_MAC_SPEED_25G:
2185                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2186                                 HCLGE_CFG_SPEED_S, 2);
2187                 break;
2188         case HCLGE_MAC_SPEED_40G:
2189                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2190                                 HCLGE_CFG_SPEED_S, 3);
2191                 break;
2192         case HCLGE_MAC_SPEED_50G:
2193                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2194                                 HCLGE_CFG_SPEED_S, 4);
2195                 break;
2196         case HCLGE_MAC_SPEED_100G:
2197                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2198                                 HCLGE_CFG_SPEED_S, 5);
2199                 break;
2200         default:
2201                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2202                 return -EINVAL;
2203         }
2204
2205         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2206                       1);
2207
2208         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2209         if (ret) {
2210                 dev_err(&hdev->pdev->dev,
2211                         "mac speed/duplex config cmd failed %d.\n", ret);
2212                 return ret;
2213         }
2214
2215         return 0;
2216 }
2217
2218 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2219 {
2220         int ret;
2221
2222         duplex = hclge_check_speed_dup(duplex, speed);
2223         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2224                 return 0;
2225
2226         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2227         if (ret)
2228                 return ret;
2229
2230         hdev->hw.mac.speed = speed;
2231         hdev->hw.mac.duplex = duplex;
2232
2233         return 0;
2234 }
2235
2236 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2237                                      u8 duplex)
2238 {
2239         struct hclge_vport *vport = hclge_get_vport(handle);
2240         struct hclge_dev *hdev = vport->back;
2241
2242         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2243 }
2244
2245 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2246 {
2247         struct hclge_config_auto_neg_cmd *req;
2248         struct hclge_desc desc;
2249         u32 flag = 0;
2250         int ret;
2251
2252         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2253
2254         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2255         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2256         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2257
2258         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2259         if (ret)
2260                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2261                         ret);
2262
2263         return ret;
2264 }
2265
2266 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2267 {
2268         struct hclge_vport *vport = hclge_get_vport(handle);
2269         struct hclge_dev *hdev = vport->back;
2270
2271         if (!hdev->hw.mac.support_autoneg) {
2272                 if (enable) {
2273                         dev_err(&hdev->pdev->dev,
2274                                 "autoneg is not supported by current port\n");
2275                         return -EOPNOTSUPP;
2276                 } else {
2277                         return 0;
2278                 }
2279         }
2280
2281         return hclge_set_autoneg_en(hdev, enable);
2282 }
2283
2284 static int hclge_get_autoneg(struct hnae3_handle *handle)
2285 {
2286         struct hclge_vport *vport = hclge_get_vport(handle);
2287         struct hclge_dev *hdev = vport->back;
2288         struct phy_device *phydev = hdev->hw.mac.phydev;
2289
2290         if (phydev)
2291                 return phydev->autoneg;
2292
2293         return hdev->hw.mac.autoneg;
2294 }
2295
2296 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2297 {
2298         struct hclge_vport *vport = hclge_get_vport(handle);
2299         struct hclge_dev *hdev = vport->back;
2300         int ret;
2301
2302         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2303
2304         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2305         if (ret)
2306                 return ret;
2307         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2308 }
2309
2310 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2311 {
2312         struct hclge_config_fec_cmd *req;
2313         struct hclge_desc desc;
2314         int ret;
2315
2316         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2317
2318         req = (struct hclge_config_fec_cmd *)desc.data;
2319         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2320                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2321         if (fec_mode & BIT(HNAE3_FEC_RS))
2322                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2323                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2324         if (fec_mode & BIT(HNAE3_FEC_BASER))
2325                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2326                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2327
2328         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2329         if (ret)
2330                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2331
2332         return ret;
2333 }
2334
2335 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2336 {
2337         struct hclge_vport *vport = hclge_get_vport(handle);
2338         struct hclge_dev *hdev = vport->back;
2339         struct hclge_mac *mac = &hdev->hw.mac;
2340         int ret;
2341
2342         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2343                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2344                 return -EINVAL;
2345         }
2346
2347         ret = hclge_set_fec_hw(hdev, fec_mode);
2348         if (ret)
2349                 return ret;
2350
2351         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2352         return 0;
2353 }
2354
2355 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2356                           u8 *fec_mode)
2357 {
2358         struct hclge_vport *vport = hclge_get_vport(handle);
2359         struct hclge_dev *hdev = vport->back;
2360         struct hclge_mac *mac = &hdev->hw.mac;
2361
2362         if (fec_ability)
2363                 *fec_ability = mac->fec_ability;
2364         if (fec_mode)
2365                 *fec_mode = mac->fec_mode;
2366 }
2367
2368 static int hclge_mac_init(struct hclge_dev *hdev)
2369 {
2370         struct hclge_mac *mac = &hdev->hw.mac;
2371         int ret;
2372
2373         hdev->support_sfp_query = true;
2374         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2375         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2376                                          hdev->hw.mac.duplex);
2377         if (ret) {
2378                 dev_err(&hdev->pdev->dev,
2379                         "Config mac speed dup fail ret=%d\n", ret);
2380                 return ret;
2381         }
2382
2383         mac->link = 0;
2384
2385         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2386                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2387                 if (ret) {
2388                         dev_err(&hdev->pdev->dev,
2389                                 "Fec mode init fail, ret = %d\n", ret);
2390                         return ret;
2391                 }
2392         }
2393
2394         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2395         if (ret) {
2396                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2397                 return ret;
2398         }
2399
2400         ret = hclge_buffer_alloc(hdev);
2401         if (ret)
2402                 dev_err(&hdev->pdev->dev,
2403                         "allocate buffer fail, ret=%d\n", ret);
2404
2405         return ret;
2406 }
2407
2408 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2409 {
2410         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2411             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2412                 schedule_work(&hdev->mbx_service_task);
2413 }
2414
2415 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2416 {
2417         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2418             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2419                 schedule_work(&hdev->rst_service_task);
2420 }
2421
2422 static void hclge_task_schedule(struct hclge_dev *hdev)
2423 {
2424         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2425             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2426             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2427                 (void)schedule_work(&hdev->service_task);
2428 }
2429
2430 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2431 {
2432         struct hclge_link_status_cmd *req;
2433         struct hclge_desc desc;
2434         int link_status;
2435         int ret;
2436
2437         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2438         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2439         if (ret) {
2440                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2441                         ret);
2442                 return ret;
2443         }
2444
2445         req = (struct hclge_link_status_cmd *)desc.data;
2446         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2447
2448         return !!link_status;
2449 }
2450
2451 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2452 {
2453         int mac_state;
2454         int link_stat;
2455
2456         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2457                 return 0;
2458
2459         mac_state = hclge_get_mac_link_status(hdev);
2460
2461         if (hdev->hw.mac.phydev) {
2462                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2463                         link_stat = mac_state &
2464                                 hdev->hw.mac.phydev->link;
2465                 else
2466                         link_stat = 0;
2467
2468         } else {
2469                 link_stat = mac_state;
2470         }
2471
2472         return !!link_stat;
2473 }
2474
2475 static void hclge_update_link_status(struct hclge_dev *hdev)
2476 {
2477         struct hnae3_client *rclient = hdev->roce_client;
2478         struct hnae3_client *client = hdev->nic_client;
2479         struct hnae3_handle *rhandle;
2480         struct hnae3_handle *handle;
2481         int state;
2482         int i;
2483
2484         if (!client)
2485                 return;
2486         state = hclge_get_mac_phy_link(hdev);
2487         if (state != hdev->hw.mac.link) {
2488                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2489                         handle = &hdev->vport[i].nic;
2490                         client->ops->link_status_change(handle, state);
2491                         hclge_config_mac_tnl_int(hdev, state);
2492                         rhandle = &hdev->vport[i].roce;
2493                         if (rclient && rclient->ops->link_status_change)
2494                                 rclient->ops->link_status_change(rhandle,
2495                                                                  state);
2496                 }
2497                 hdev->hw.mac.link = state;
2498         }
2499 }
2500
2501 static void hclge_update_port_capability(struct hclge_mac *mac)
2502 {
2503         /* update fec ability by speed */
2504         hclge_convert_setting_fec(mac);
2505
2506         /* firmware can not identify back plane type, the media type
2507          * read from configuration can help deal it
2508          */
2509         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2510             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2511                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2512         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2513                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2514
2515         if (mac->support_autoneg == true) {
2516                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2517                 linkmode_copy(mac->advertising, mac->supported);
2518         } else {
2519                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2520                                    mac->supported);
2521                 linkmode_zero(mac->advertising);
2522         }
2523 }
2524
2525 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2526 {
2527         struct hclge_sfp_info_cmd *resp = NULL;
2528         struct hclge_desc desc;
2529         int ret;
2530
2531         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2532         resp = (struct hclge_sfp_info_cmd *)desc.data;
2533         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2534         if (ret == -EOPNOTSUPP) {
2535                 dev_warn(&hdev->pdev->dev,
2536                          "IMP do not support get SFP speed %d\n", ret);
2537                 return ret;
2538         } else if (ret) {
2539                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2540                 return ret;
2541         }
2542
2543         *speed = le32_to_cpu(resp->speed);
2544
2545         return 0;
2546 }
2547
2548 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2549 {
2550         struct hclge_sfp_info_cmd *resp;
2551         struct hclge_desc desc;
2552         int ret;
2553
2554         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2555         resp = (struct hclge_sfp_info_cmd *)desc.data;
2556
2557         resp->query_type = QUERY_ACTIVE_SPEED;
2558
2559         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2560         if (ret == -EOPNOTSUPP) {
2561                 dev_warn(&hdev->pdev->dev,
2562                          "IMP does not support get SFP info %d\n", ret);
2563                 return ret;
2564         } else if (ret) {
2565                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2566                 return ret;
2567         }
2568
2569         mac->speed = le32_to_cpu(resp->speed);
2570         /* if resp->speed_ability is 0, it means it's an old version
2571          * firmware, do not update these params
2572          */
2573         if (resp->speed_ability) {
2574                 mac->module_type = le32_to_cpu(resp->module_type);
2575                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2576                 mac->autoneg = resp->autoneg;
2577                 mac->support_autoneg = resp->autoneg_ability;
2578                 if (!resp->active_fec)
2579                         mac->fec_mode = 0;
2580                 else
2581                         mac->fec_mode = BIT(resp->active_fec);
2582         } else {
2583                 mac->speed_type = QUERY_SFP_SPEED;
2584         }
2585
2586         return 0;
2587 }
2588
2589 static int hclge_update_port_info(struct hclge_dev *hdev)
2590 {
2591         struct hclge_mac *mac = &hdev->hw.mac;
2592         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2593         int ret;
2594
2595         /* get the port info from SFP cmd if not copper port */
2596         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2597                 return 0;
2598
2599         /* if IMP does not support get SFP/qSFP info, return directly */
2600         if (!hdev->support_sfp_query)
2601                 return 0;
2602
2603         if (hdev->pdev->revision >= 0x21)
2604                 ret = hclge_get_sfp_info(hdev, mac);
2605         else
2606                 ret = hclge_get_sfp_speed(hdev, &speed);
2607
2608         if (ret == -EOPNOTSUPP) {
2609                 hdev->support_sfp_query = false;
2610                 return ret;
2611         } else if (ret) {
2612                 return ret;
2613         }
2614
2615         if (hdev->pdev->revision >= 0x21) {
2616                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2617                         hclge_update_port_capability(mac);
2618                         return 0;
2619                 }
2620                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2621                                                HCLGE_MAC_FULL);
2622         } else {
2623                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2624                         return 0; /* do nothing if no SFP */
2625
2626                 /* must config full duplex for SFP */
2627                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2628         }
2629 }
2630
2631 static int hclge_get_status(struct hnae3_handle *handle)
2632 {
2633         struct hclge_vport *vport = hclge_get_vport(handle);
2634         struct hclge_dev *hdev = vport->back;
2635
2636         hclge_update_link_status(hdev);
2637
2638         return hdev->hw.mac.link;
2639 }
2640
2641 static void hclge_service_timer(struct timer_list *t)
2642 {
2643         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2644
2645         mod_timer(&hdev->service_timer, jiffies + HZ);
2646         hdev->hw_stats.stats_timer++;
2647         hdev->fd_arfs_expire_timer++;
2648         hclge_task_schedule(hdev);
2649 }
2650
2651 static void hclge_service_complete(struct hclge_dev *hdev)
2652 {
2653         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2654
2655         /* Flush memory before next watchdog */
2656         smp_mb__before_atomic();
2657         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2658 }
2659
2660 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2661 {
2662         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2663
2664         /* fetch the events from their corresponding regs */
2665         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2666         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2667         msix_src_reg = hclge_read_dev(&hdev->hw,
2668                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2669
2670         /* Assumption: If by any chance reset and mailbox events are reported
2671          * together then we will only process reset event in this go and will
2672          * defer the processing of the mailbox events. Since, we would have not
2673          * cleared RX CMDQ event this time we would receive again another
2674          * interrupt from H/W just for the mailbox.
2675          */
2676
2677         /* check for vector0 reset event sources */
2678         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2679                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2680                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2681                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2682                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2683                 hdev->rst_stats.imp_rst_cnt++;
2684                 return HCLGE_VECTOR0_EVENT_RST;
2685         }
2686
2687         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2688                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2689                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2690                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2691                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2692                 hdev->rst_stats.global_rst_cnt++;
2693                 return HCLGE_VECTOR0_EVENT_RST;
2694         }
2695
2696         /* check for vector0 msix event source */
2697         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2698                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2699                         msix_src_reg);
2700                 return HCLGE_VECTOR0_EVENT_ERR;
2701         }
2702
2703         /* check for vector0 mailbox(=CMDQ RX) event source */
2704         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2705                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2706                 *clearval = cmdq_src_reg;
2707                 return HCLGE_VECTOR0_EVENT_MBX;
2708         }
2709
2710         /* print other vector0 event source */
2711         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2712                 cmdq_src_reg, msix_src_reg);
2713         return HCLGE_VECTOR0_EVENT_OTHER;
2714 }
2715
2716 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2717                                     u32 regclr)
2718 {
2719         switch (event_type) {
2720         case HCLGE_VECTOR0_EVENT_RST:
2721                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2722                 break;
2723         case HCLGE_VECTOR0_EVENT_MBX:
2724                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2725                 break;
2726         default:
2727                 break;
2728         }
2729 }
2730
2731 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2732 {
2733         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2734                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2735                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2736                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2737         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2738 }
2739
2740 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2741 {
2742         writel(enable ? 1 : 0, vector->addr);
2743 }
2744
2745 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2746 {
2747         struct hclge_dev *hdev = data;
2748         u32 event_cause;
2749         u32 clearval;
2750
2751         hclge_enable_vector(&hdev->misc_vector, false);
2752         event_cause = hclge_check_event_cause(hdev, &clearval);
2753
2754         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2755         switch (event_cause) {
2756         case HCLGE_VECTOR0_EVENT_ERR:
2757                 /* we do not know what type of reset is required now. This could
2758                  * only be decided after we fetch the type of errors which
2759                  * caused this event. Therefore, we will do below for now:
2760                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2761                  *    have defered type of reset to be used.
2762                  * 2. Schedule the reset serivce task.
2763                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2764                  *    will fetch the correct type of reset.  This would be done
2765                  *    by first decoding the types of errors.
2766                  */
2767                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2768                 /* fall through */
2769         case HCLGE_VECTOR0_EVENT_RST:
2770                 hclge_reset_task_schedule(hdev);
2771                 break;
2772         case HCLGE_VECTOR0_EVENT_MBX:
2773                 /* If we are here then,
2774                  * 1. Either we are not handling any mbx task and we are not
2775                  *    scheduled as well
2776                  *                        OR
2777                  * 2. We could be handling a mbx task but nothing more is
2778                  *    scheduled.
2779                  * In both cases, we should schedule mbx task as there are more
2780                  * mbx messages reported by this interrupt.
2781                  */
2782                 hclge_mbx_task_schedule(hdev);
2783                 break;
2784         default:
2785                 dev_warn(&hdev->pdev->dev,
2786                          "received unknown or unhandled event of vector0\n");
2787                 break;
2788         }
2789
2790         /* clear the source of interrupt if it is not cause by reset */
2791         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2792                 hclge_clear_event_cause(hdev, event_cause, clearval);
2793                 hclge_enable_vector(&hdev->misc_vector, true);
2794         }
2795
2796         return IRQ_HANDLED;
2797 }
2798
2799 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2800 {
2801         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2802                 dev_warn(&hdev->pdev->dev,
2803                          "vector(vector_id %d) has been freed.\n", vector_id);
2804                 return;
2805         }
2806
2807         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2808         hdev->num_msi_left += 1;
2809         hdev->num_msi_used -= 1;
2810 }
2811
2812 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2813 {
2814         struct hclge_misc_vector *vector = &hdev->misc_vector;
2815
2816         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2817
2818         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2819         hdev->vector_status[0] = 0;
2820
2821         hdev->num_msi_left -= 1;
2822         hdev->num_msi_used += 1;
2823 }
2824
2825 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2826 {
2827         int ret;
2828
2829         hclge_get_misc_vector(hdev);
2830
2831         /* this would be explicitly freed in the end */
2832         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2833                           0, "hclge_misc", hdev);
2834         if (ret) {
2835                 hclge_free_vector(hdev, 0);
2836                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2837                         hdev->misc_vector.vector_irq);
2838         }
2839
2840         return ret;
2841 }
2842
2843 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2844 {
2845         free_irq(hdev->misc_vector.vector_irq, hdev);
2846         hclge_free_vector(hdev, 0);
2847 }
2848
2849 int hclge_notify_client(struct hclge_dev *hdev,
2850                         enum hnae3_reset_notify_type type)
2851 {
2852         struct hnae3_client *client = hdev->nic_client;
2853         u16 i;
2854
2855         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2856             !client)
2857                 return 0;
2858
2859         if (!client->ops->reset_notify)
2860                 return -EOPNOTSUPP;
2861
2862         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2863                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2864                 int ret;
2865
2866                 ret = client->ops->reset_notify(handle, type);
2867                 if (ret) {
2868                         dev_err(&hdev->pdev->dev,
2869                                 "notify nic client failed %d(%d)\n", type, ret);
2870                         return ret;
2871                 }
2872         }
2873
2874         return 0;
2875 }
2876
2877 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2878                                     enum hnae3_reset_notify_type type)
2879 {
2880         struct hnae3_client *client = hdev->roce_client;
2881         int ret = 0;
2882         u16 i;
2883
2884         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
2885             !client)
2886                 return 0;
2887
2888         if (!client->ops->reset_notify)
2889                 return -EOPNOTSUPP;
2890
2891         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2892                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2893
2894                 ret = client->ops->reset_notify(handle, type);
2895                 if (ret) {
2896                         dev_err(&hdev->pdev->dev,
2897                                 "notify roce client failed %d(%d)",
2898                                 type, ret);
2899                         return ret;
2900                 }
2901         }
2902
2903         return ret;
2904 }
2905
2906 static int hclge_reset_wait(struct hclge_dev *hdev)
2907 {
2908 #define HCLGE_RESET_WATI_MS     100
2909 #define HCLGE_RESET_WAIT_CNT    200
2910         u32 val, reg, reg_bit;
2911         u32 cnt = 0;
2912
2913         switch (hdev->reset_type) {
2914         case HNAE3_IMP_RESET:
2915                 reg = HCLGE_GLOBAL_RESET_REG;
2916                 reg_bit = HCLGE_IMP_RESET_BIT;
2917                 break;
2918         case HNAE3_GLOBAL_RESET:
2919                 reg = HCLGE_GLOBAL_RESET_REG;
2920                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2921                 break;
2922         case HNAE3_FUNC_RESET:
2923                 reg = HCLGE_FUN_RST_ING;
2924                 reg_bit = HCLGE_FUN_RST_ING_B;
2925                 break;
2926         case HNAE3_FLR_RESET:
2927                 break;
2928         default:
2929                 dev_err(&hdev->pdev->dev,
2930                         "Wait for unsupported reset type: %d\n",
2931                         hdev->reset_type);
2932                 return -EINVAL;
2933         }
2934
2935         if (hdev->reset_type == HNAE3_FLR_RESET) {
2936                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2937                        cnt++ < HCLGE_RESET_WAIT_CNT)
2938                         msleep(HCLGE_RESET_WATI_MS);
2939
2940                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2941                         dev_err(&hdev->pdev->dev,
2942                                 "flr wait timeout: %d\n", cnt);
2943                         return -EBUSY;
2944                 }
2945
2946                 return 0;
2947         }
2948
2949         val = hclge_read_dev(&hdev->hw, reg);
2950         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2951                 msleep(HCLGE_RESET_WATI_MS);
2952                 val = hclge_read_dev(&hdev->hw, reg);
2953                 cnt++;
2954         }
2955
2956         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2957                 dev_warn(&hdev->pdev->dev,
2958                          "Wait for reset timeout: %d\n", hdev->reset_type);
2959                 return -EBUSY;
2960         }
2961
2962         return 0;
2963 }
2964
2965 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2966 {
2967         struct hclge_vf_rst_cmd *req;
2968         struct hclge_desc desc;
2969
2970         req = (struct hclge_vf_rst_cmd *)desc.data;
2971         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2972         req->dest_vfid = func_id;
2973
2974         if (reset)
2975                 req->vf_rst = 0x1;
2976
2977         return hclge_cmd_send(&hdev->hw, &desc, 1);
2978 }
2979
2980 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2981 {
2982         int i;
2983
2984         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2985                 struct hclge_vport *vport = &hdev->vport[i];
2986                 int ret;
2987
2988                 /* Send cmd to set/clear VF's FUNC_RST_ING */
2989                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2990                 if (ret) {
2991                         dev_err(&hdev->pdev->dev,
2992                                 "set vf(%d) rst failed %d!\n",
2993                                 vport->vport_id, ret);
2994                         return ret;
2995                 }
2996
2997                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2998                         continue;
2999
3000                 /* Inform VF to process the reset.
3001                  * hclge_inform_reset_assert_to_vf may fail if VF
3002                  * driver is not loaded.
3003                  */
3004                 ret = hclge_inform_reset_assert_to_vf(vport);
3005                 if (ret)
3006                         dev_warn(&hdev->pdev->dev,
3007                                  "inform reset to vf(%d) failed %d!\n",
3008                                  vport->vport_id, ret);
3009         }
3010
3011         return 0;
3012 }
3013
3014 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3015 {
3016         struct hclge_desc desc;
3017         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3018         int ret;
3019
3020         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3021         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3022         req->fun_reset_vfid = func_id;
3023
3024         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3025         if (ret)
3026                 dev_err(&hdev->pdev->dev,
3027                         "send function reset cmd fail, status =%d\n", ret);
3028
3029         return ret;
3030 }
3031
3032 static void hclge_do_reset(struct hclge_dev *hdev)
3033 {
3034         struct hnae3_handle *handle = &hdev->vport[0].nic;
3035         struct pci_dev *pdev = hdev->pdev;
3036         u32 val;
3037
3038         if (hclge_get_hw_reset_stat(handle)) {
3039                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3040                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3041                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3042                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3043                 return;
3044         }
3045
3046         switch (hdev->reset_type) {
3047         case HNAE3_GLOBAL_RESET:
3048                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3049                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3050                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3051                 dev_info(&pdev->dev, "Global Reset requested\n");
3052                 break;
3053         case HNAE3_FUNC_RESET:
3054                 dev_info(&pdev->dev, "PF Reset requested\n");
3055                 /* schedule again to check later */
3056                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3057                 hclge_reset_task_schedule(hdev);
3058                 break;
3059         case HNAE3_FLR_RESET:
3060                 dev_info(&pdev->dev, "FLR requested\n");
3061                 /* schedule again to check later */
3062                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3063                 hclge_reset_task_schedule(hdev);
3064                 break;
3065         default:
3066                 dev_warn(&pdev->dev,
3067                          "Unsupported reset type: %d\n", hdev->reset_type);
3068                 break;
3069         }
3070 }
3071
3072 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3073                                                    unsigned long *addr)
3074 {
3075         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3076
3077         /* first, resolve any unknown reset type to the known type(s) */
3078         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3079                 /* we will intentionally ignore any errors from this function
3080                  *  as we will end up in *some* reset request in any case
3081                  */
3082                 hclge_handle_hw_msix_error(hdev, addr);
3083                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3084                 /* We defered the clearing of the error event which caused
3085                  * interrupt since it was not posssible to do that in
3086                  * interrupt context (and this is the reason we introduced
3087                  * new UNKNOWN reset type). Now, the errors have been
3088                  * handled and cleared in hardware we can safely enable
3089                  * interrupts. This is an exception to the norm.
3090                  */
3091                 hclge_enable_vector(&hdev->misc_vector, true);
3092         }
3093
3094         /* return the highest priority reset level amongst all */
3095         if (test_bit(HNAE3_IMP_RESET, addr)) {
3096                 rst_level = HNAE3_IMP_RESET;
3097                 clear_bit(HNAE3_IMP_RESET, addr);
3098                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3099                 clear_bit(HNAE3_FUNC_RESET, addr);
3100         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3101                 rst_level = HNAE3_GLOBAL_RESET;
3102                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3103                 clear_bit(HNAE3_FUNC_RESET, addr);
3104         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3105                 rst_level = HNAE3_FUNC_RESET;
3106                 clear_bit(HNAE3_FUNC_RESET, addr);
3107         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3108                 rst_level = HNAE3_FLR_RESET;
3109                 clear_bit(HNAE3_FLR_RESET, addr);
3110         }
3111
3112         if (hdev->reset_type != HNAE3_NONE_RESET &&
3113             rst_level < hdev->reset_type)
3114                 return HNAE3_NONE_RESET;
3115
3116         return rst_level;
3117 }
3118
3119 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3120 {
3121         u32 clearval = 0;
3122
3123         switch (hdev->reset_type) {
3124         case HNAE3_IMP_RESET:
3125                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3126                 break;
3127         case HNAE3_GLOBAL_RESET:
3128                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3129                 break;
3130         default:
3131                 break;
3132         }
3133
3134         if (!clearval)
3135                 return;
3136
3137         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3138         hclge_enable_vector(&hdev->misc_vector, true);
3139 }
3140
3141 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3142 {
3143         int ret = 0;
3144
3145         switch (hdev->reset_type) {
3146         case HNAE3_FUNC_RESET:
3147                 /* fall through */
3148         case HNAE3_FLR_RESET:
3149                 ret = hclge_set_all_vf_rst(hdev, true);
3150                 break;
3151         default:
3152                 break;
3153         }
3154
3155         return ret;
3156 }
3157
3158 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3159 {
3160 #define HCLGE_RESET_SYNC_TIME 100
3161
3162         u32 reg_val;
3163         int ret = 0;
3164
3165         switch (hdev->reset_type) {
3166         case HNAE3_FUNC_RESET:
3167                 /* There is no mechanism for PF to know if VF has stopped IO
3168                  * for now, just wait 100 ms for VF to stop IO
3169                  */
3170                 msleep(HCLGE_RESET_SYNC_TIME);
3171                 ret = hclge_func_reset_cmd(hdev, 0);
3172                 if (ret) {
3173                         dev_err(&hdev->pdev->dev,
3174                                 "asserting function reset fail %d!\n", ret);
3175                         return ret;
3176                 }
3177
3178                 /* After performaning pf reset, it is not necessary to do the
3179                  * mailbox handling or send any command to firmware, because
3180                  * any mailbox handling or command to firmware is only valid
3181                  * after hclge_cmd_init is called.
3182                  */
3183                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3184                 hdev->rst_stats.pf_rst_cnt++;
3185                 break;
3186         case HNAE3_FLR_RESET:
3187                 /* There is no mechanism for PF to know if VF has stopped IO
3188                  * for now, just wait 100 ms for VF to stop IO
3189                  */
3190                 msleep(HCLGE_RESET_SYNC_TIME);
3191                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3192                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3193                 hdev->rst_stats.flr_rst_cnt++;
3194                 break;
3195         case HNAE3_IMP_RESET:
3196                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3197                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3198                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3199                 break;
3200         default:
3201                 break;
3202         }
3203
3204         /* inform hardware that preparatory work is done */
3205         msleep(HCLGE_RESET_SYNC_TIME);
3206         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3207                         HCLGE_NIC_CMQ_ENABLE);
3208         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3209
3210         return ret;
3211 }
3212
3213 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3214 {
3215 #define MAX_RESET_FAIL_CNT 5
3216 #define RESET_UPGRADE_DELAY_SEC 10
3217
3218         if (hdev->reset_pending) {
3219                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3220                          hdev->reset_pending);
3221                 return true;
3222         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3223                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3224                     BIT(HCLGE_IMP_RESET_BIT))) {
3225                 dev_info(&hdev->pdev->dev,
3226                          "reset failed because IMP Reset is pending\n");
3227                 hclge_clear_reset_cause(hdev);
3228                 return false;
3229         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3230                 hdev->reset_fail_cnt++;
3231                 if (is_timeout) {
3232                         set_bit(hdev->reset_type, &hdev->reset_pending);
3233                         dev_info(&hdev->pdev->dev,
3234                                  "re-schedule to wait for hw reset done\n");
3235                         return true;
3236                 }
3237
3238                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3239                 hclge_clear_reset_cause(hdev);
3240                 mod_timer(&hdev->reset_timer,
3241                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3242
3243                 return false;
3244         }
3245
3246         hclge_clear_reset_cause(hdev);
3247         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3248         return false;
3249 }
3250
3251 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3252 {
3253         int ret = 0;
3254
3255         switch (hdev->reset_type) {
3256         case HNAE3_FUNC_RESET:
3257                 /* fall through */
3258         case HNAE3_FLR_RESET:
3259                 ret = hclge_set_all_vf_rst(hdev, false);
3260                 break;
3261         default:
3262                 break;
3263         }
3264
3265         return ret;
3266 }
3267
3268 static void hclge_reset(struct hclge_dev *hdev)
3269 {
3270         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3271         bool is_timeout = false;
3272         int ret;
3273
3274         /* Initialize ae_dev reset status as well, in case enet layer wants to
3275          * know if device is undergoing reset
3276          */
3277         ae_dev->reset_type = hdev->reset_type;
3278         hdev->rst_stats.reset_cnt++;
3279         /* perform reset of the stack & ae device for a client */
3280         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3281         if (ret)
3282                 goto err_reset;
3283
3284         ret = hclge_reset_prepare_down(hdev);
3285         if (ret)
3286                 goto err_reset;
3287
3288         rtnl_lock();
3289         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3290         if (ret)
3291                 goto err_reset_lock;
3292
3293         rtnl_unlock();
3294
3295         ret = hclge_reset_prepare_wait(hdev);
3296         if (ret)
3297                 goto err_reset;
3298
3299         if (hclge_reset_wait(hdev)) {
3300                 is_timeout = true;
3301                 goto err_reset;
3302         }
3303
3304         hdev->rst_stats.hw_reset_done_cnt++;
3305
3306         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3307         if (ret)
3308                 goto err_reset;
3309
3310         rtnl_lock();
3311         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3312         if (ret)
3313                 goto err_reset_lock;
3314
3315         ret = hclge_reset_ae_dev(hdev->ae_dev);
3316         if (ret)
3317                 goto err_reset_lock;
3318
3319         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3320         if (ret)
3321                 goto err_reset_lock;
3322
3323         ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3324         if (ret)
3325                 goto err_reset_lock;
3326
3327         hclge_clear_reset_cause(hdev);
3328
3329         ret = hclge_reset_prepare_up(hdev);
3330         if (ret)
3331                 goto err_reset_lock;
3332
3333         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3334         if (ret)
3335                 goto err_reset_lock;
3336
3337         rtnl_unlock();
3338
3339         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3340         if (ret)
3341                 goto err_reset;
3342
3343         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3344         if (ret)
3345                 goto err_reset;
3346
3347         hdev->last_reset_time = jiffies;
3348         hdev->reset_fail_cnt = 0;
3349         hdev->rst_stats.reset_done_cnt++;
3350         ae_dev->reset_type = HNAE3_NONE_RESET;
3351         del_timer(&hdev->reset_timer);
3352
3353         return;
3354
3355 err_reset_lock:
3356         rtnl_unlock();
3357 err_reset:
3358         if (hclge_reset_err_handle(hdev, is_timeout))
3359                 hclge_reset_task_schedule(hdev);
3360 }
3361
3362 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3363 {
3364         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3365         struct hclge_dev *hdev = ae_dev->priv;
3366
3367         /* We might end up getting called broadly because of 2 below cases:
3368          * 1. Recoverable error was conveyed through APEI and only way to bring
3369          *    normalcy is to reset.
3370          * 2. A new reset request from the stack due to timeout
3371          *
3372          * For the first case,error event might not have ae handle available.
3373          * check if this is a new reset request and we are not here just because
3374          * last reset attempt did not succeed and watchdog hit us again. We will
3375          * know this if last reset request did not occur very recently (watchdog
3376          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3377          * In case of new request we reset the "reset level" to PF reset.
3378          * And if it is a repeat reset request of the most recent one then we
3379          * want to make sure we throttle the reset request. Therefore, we will
3380          * not allow it again before 3*HZ times.
3381          */
3382         if (!handle)
3383                 handle = &hdev->vport[0].nic;
3384
3385         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3386                 return;
3387         else if (hdev->default_reset_request)
3388                 hdev->reset_level =
3389                         hclge_get_reset_level(hdev,
3390                                               &hdev->default_reset_request);
3391         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3392                 hdev->reset_level = HNAE3_FUNC_RESET;
3393
3394         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3395                  hdev->reset_level);
3396
3397         /* request reset & schedule reset task */
3398         set_bit(hdev->reset_level, &hdev->reset_request);
3399         hclge_reset_task_schedule(hdev);
3400
3401         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3402                 hdev->reset_level++;
3403 }
3404
3405 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3406                                         enum hnae3_reset_type rst_type)
3407 {
3408         struct hclge_dev *hdev = ae_dev->priv;
3409
3410         set_bit(rst_type, &hdev->default_reset_request);
3411 }
3412
3413 static void hclge_reset_timer(struct timer_list *t)
3414 {
3415         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3416
3417         dev_info(&hdev->pdev->dev,
3418                  "triggering global reset in reset timer\n");
3419         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3420         hclge_reset_event(hdev->pdev, NULL);
3421 }
3422
3423 static void hclge_reset_subtask(struct hclge_dev *hdev)
3424 {
3425         /* check if there is any ongoing reset in the hardware. This status can
3426          * be checked from reset_pending. If there is then, we need to wait for
3427          * hardware to complete reset.
3428          *    a. If we are able to figure out in reasonable time that hardware
3429          *       has fully resetted then, we can proceed with driver, client
3430          *       reset.
3431          *    b. else, we can come back later to check this status so re-sched
3432          *       now.
3433          */
3434         hdev->last_reset_time = jiffies;
3435         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3436         if (hdev->reset_type != HNAE3_NONE_RESET)
3437                 hclge_reset(hdev);
3438
3439         /* check if we got any *new* reset requests to be honored */
3440         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3441         if (hdev->reset_type != HNAE3_NONE_RESET)
3442                 hclge_do_reset(hdev);
3443
3444         hdev->reset_type = HNAE3_NONE_RESET;
3445 }
3446
3447 static void hclge_reset_service_task(struct work_struct *work)
3448 {
3449         struct hclge_dev *hdev =
3450                 container_of(work, struct hclge_dev, rst_service_task);
3451
3452         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3453                 return;
3454
3455         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3456
3457         hclge_reset_subtask(hdev);
3458
3459         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3460 }
3461
3462 static void hclge_mailbox_service_task(struct work_struct *work)
3463 {
3464         struct hclge_dev *hdev =
3465                 container_of(work, struct hclge_dev, mbx_service_task);
3466
3467         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3468                 return;
3469
3470         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3471
3472         hclge_mbx_handler(hdev);
3473
3474         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3475 }
3476
3477 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3478 {
3479         int i;
3480
3481         /* start from vport 1 for PF is always alive */
3482         for (i = 1; i < hdev->num_alloc_vport; i++) {
3483                 struct hclge_vport *vport = &hdev->vport[i];
3484
3485                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3486                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3487
3488                 /* If vf is not alive, set to default value */
3489                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3490                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3491         }
3492 }
3493
3494 static void hclge_service_task(struct work_struct *work)
3495 {
3496         struct hclge_dev *hdev =
3497                 container_of(work, struct hclge_dev, service_task);
3498
3499         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3500                 hclge_update_stats_for_all(hdev);
3501                 hdev->hw_stats.stats_timer = 0;
3502         }
3503
3504         hclge_update_port_info(hdev);
3505         hclge_update_link_status(hdev);
3506         hclge_update_vport_alive(hdev);
3507         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3508                 hclge_rfs_filter_expire(hdev);
3509                 hdev->fd_arfs_expire_timer = 0;
3510         }
3511         hclge_service_complete(hdev);
3512 }
3513
3514 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3515 {
3516         /* VF handle has no client */
3517         if (!handle->client)
3518                 return container_of(handle, struct hclge_vport, nic);
3519         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3520                 return container_of(handle, struct hclge_vport, roce);
3521         else
3522                 return container_of(handle, struct hclge_vport, nic);
3523 }
3524
3525 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3526                             struct hnae3_vector_info *vector_info)
3527 {
3528         struct hclge_vport *vport = hclge_get_vport(handle);
3529         struct hnae3_vector_info *vector = vector_info;
3530         struct hclge_dev *hdev = vport->back;
3531         int alloc = 0;
3532         int i, j;
3533
3534         vector_num = min(hdev->num_msi_left, vector_num);
3535
3536         for (j = 0; j < vector_num; j++) {
3537                 for (i = 1; i < hdev->num_msi; i++) {
3538                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3539                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3540                                 vector->io_addr = hdev->hw.io_base +
3541                                         HCLGE_VECTOR_REG_BASE +
3542                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3543                                         vport->vport_id *
3544                                         HCLGE_VECTOR_VF_OFFSET;
3545                                 hdev->vector_status[i] = vport->vport_id;
3546                                 hdev->vector_irq[i] = vector->vector;
3547
3548                                 vector++;
3549                                 alloc++;
3550
3551                                 break;
3552                         }
3553                 }
3554         }
3555         hdev->num_msi_left -= alloc;
3556         hdev->num_msi_used += alloc;
3557
3558         return alloc;
3559 }
3560
3561 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3562 {
3563         int i;
3564
3565         for (i = 0; i < hdev->num_msi; i++)
3566                 if (vector == hdev->vector_irq[i])
3567                         return i;
3568
3569         return -EINVAL;
3570 }
3571
3572 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3573 {
3574         struct hclge_vport *vport = hclge_get_vport(handle);
3575         struct hclge_dev *hdev = vport->back;
3576         int vector_id;
3577
3578         vector_id = hclge_get_vector_index(hdev, vector);
3579         if (vector_id < 0) {
3580                 dev_err(&hdev->pdev->dev,
3581                         "Get vector index fail. vector_id =%d\n", vector_id);
3582                 return vector_id;
3583         }
3584
3585         hclge_free_vector(hdev, vector_id);
3586
3587         return 0;
3588 }
3589
3590 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3591 {
3592         return HCLGE_RSS_KEY_SIZE;
3593 }
3594
3595 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3596 {
3597         return HCLGE_RSS_IND_TBL_SIZE;
3598 }
3599
3600 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3601                                   const u8 hfunc, const u8 *key)
3602 {
3603         struct hclge_rss_config_cmd *req;
3604         struct hclge_desc desc;
3605         int key_offset;
3606         int key_size;
3607         int ret;
3608
3609         req = (struct hclge_rss_config_cmd *)desc.data;
3610
3611         for (key_offset = 0; key_offset < 3; key_offset++) {
3612                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3613                                            false);
3614
3615                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3616                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3617
3618                 if (key_offset == 2)
3619                         key_size =
3620                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3621                 else
3622                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3623
3624                 memcpy(req->hash_key,
3625                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3626
3627                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3628                 if (ret) {
3629                         dev_err(&hdev->pdev->dev,
3630                                 "Configure RSS config fail, status = %d\n",
3631                                 ret);
3632                         return ret;
3633                 }
3634         }
3635         return 0;
3636 }
3637
3638 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3639 {
3640         struct hclge_rss_indirection_table_cmd *req;
3641         struct hclge_desc desc;
3642         int i, j;
3643         int ret;
3644
3645         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3646
3647         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3648                 hclge_cmd_setup_basic_desc
3649                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3650
3651                 req->start_table_index =
3652                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3653                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3654
3655                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3656                         req->rss_result[j] =
3657                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3658
3659                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3660                 if (ret) {
3661                         dev_err(&hdev->pdev->dev,
3662                                 "Configure rss indir table fail,status = %d\n",
3663                                 ret);
3664                         return ret;
3665                 }
3666         }
3667         return 0;
3668 }
3669
3670 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3671                                  u16 *tc_size, u16 *tc_offset)
3672 {
3673         struct hclge_rss_tc_mode_cmd *req;
3674         struct hclge_desc desc;
3675         int ret;
3676         int i;
3677
3678         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3679         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3680
3681         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3682                 u16 mode = 0;
3683
3684                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3685                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3686                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3687                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3688                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3689
3690                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3691         }
3692
3693         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3694         if (ret)
3695                 dev_err(&hdev->pdev->dev,
3696                         "Configure rss tc mode fail, status = %d\n", ret);
3697
3698         return ret;
3699 }
3700
3701 static void hclge_get_rss_type(struct hclge_vport *vport)
3702 {
3703         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3704             vport->rss_tuple_sets.ipv4_udp_en ||
3705             vport->rss_tuple_sets.ipv4_sctp_en ||
3706             vport->rss_tuple_sets.ipv6_tcp_en ||
3707             vport->rss_tuple_sets.ipv6_udp_en ||
3708             vport->rss_tuple_sets.ipv6_sctp_en)
3709                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3710         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3711                  vport->rss_tuple_sets.ipv6_fragment_en)
3712                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3713         else
3714                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3715 }
3716
3717 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3718 {
3719         struct hclge_rss_input_tuple_cmd *req;
3720         struct hclge_desc desc;
3721         int ret;
3722
3723         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3724
3725         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3726
3727         /* Get the tuple cfg from pf */
3728         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3729         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3730         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3731         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3732         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3733         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3734         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3735         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3736         hclge_get_rss_type(&hdev->vport[0]);
3737         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3738         if (ret)
3739                 dev_err(&hdev->pdev->dev,
3740                         "Configure rss input fail, status = %d\n", ret);
3741         return ret;
3742 }
3743
3744 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3745                          u8 *key, u8 *hfunc)
3746 {
3747         struct hclge_vport *vport = hclge_get_vport(handle);
3748         int i;
3749
3750         /* Get hash algorithm */
3751         if (hfunc) {
3752                 switch (vport->rss_algo) {
3753                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3754                         *hfunc = ETH_RSS_HASH_TOP;
3755                         break;
3756                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3757                         *hfunc = ETH_RSS_HASH_XOR;
3758                         break;
3759                 default:
3760                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3761                         break;
3762                 }
3763         }
3764
3765         /* Get the RSS Key required by the user */
3766         if (key)
3767                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3768
3769         /* Get indirect table */
3770         if (indir)
3771                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3772                         indir[i] =  vport->rss_indirection_tbl[i];
3773
3774         return 0;
3775 }
3776
3777 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3778                          const  u8 *key, const  u8 hfunc)
3779 {
3780         struct hclge_vport *vport = hclge_get_vport(handle);
3781         struct hclge_dev *hdev = vport->back;
3782         u8 hash_algo;
3783         int ret, i;
3784
3785         /* Set the RSS Hash Key if specififed by the user */
3786         if (key) {
3787                 switch (hfunc) {
3788                 case ETH_RSS_HASH_TOP:
3789                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3790                         break;
3791                 case ETH_RSS_HASH_XOR:
3792                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3793                         break;
3794                 case ETH_RSS_HASH_NO_CHANGE:
3795                         hash_algo = vport->rss_algo;
3796                         break;
3797                 default:
3798                         return -EINVAL;
3799                 }
3800
3801                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3802                 if (ret)
3803                         return ret;
3804
3805                 /* Update the shadow RSS key with user specified qids */
3806                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3807                 vport->rss_algo = hash_algo;
3808         }
3809
3810         /* Update the shadow RSS table with user specified qids */
3811         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3812                 vport->rss_indirection_tbl[i] = indir[i];
3813
3814         /* Update the hardware */
3815         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3816 }
3817
3818 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3819 {
3820         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3821
3822         if (nfc->data & RXH_L4_B_2_3)
3823                 hash_sets |= HCLGE_D_PORT_BIT;
3824         else
3825                 hash_sets &= ~HCLGE_D_PORT_BIT;
3826
3827         if (nfc->data & RXH_IP_SRC)
3828                 hash_sets |= HCLGE_S_IP_BIT;
3829         else
3830                 hash_sets &= ~HCLGE_S_IP_BIT;
3831
3832         if (nfc->data & RXH_IP_DST)
3833                 hash_sets |= HCLGE_D_IP_BIT;
3834         else
3835                 hash_sets &= ~HCLGE_D_IP_BIT;
3836
3837         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3838                 hash_sets |= HCLGE_V_TAG_BIT;
3839
3840         return hash_sets;
3841 }
3842
3843 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3844                                struct ethtool_rxnfc *nfc)
3845 {
3846         struct hclge_vport *vport = hclge_get_vport(handle);
3847         struct hclge_dev *hdev = vport->back;
3848         struct hclge_rss_input_tuple_cmd *req;
3849         struct hclge_desc desc;
3850         u8 tuple_sets;
3851         int ret;
3852
3853         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3854                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3855                 return -EINVAL;
3856
3857         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3858         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3859
3860         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3861         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3862         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3863         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3864         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3865         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3866         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3867         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3868
3869         tuple_sets = hclge_get_rss_hash_bits(nfc);
3870         switch (nfc->flow_type) {
3871         case TCP_V4_FLOW:
3872                 req->ipv4_tcp_en = tuple_sets;
3873                 break;
3874         case TCP_V6_FLOW:
3875                 req->ipv6_tcp_en = tuple_sets;
3876                 break;
3877         case UDP_V4_FLOW:
3878                 req->ipv4_udp_en = tuple_sets;
3879                 break;
3880         case UDP_V6_FLOW:
3881                 req->ipv6_udp_en = tuple_sets;
3882                 break;
3883         case SCTP_V4_FLOW:
3884                 req->ipv4_sctp_en = tuple_sets;
3885                 break;
3886         case SCTP_V6_FLOW:
3887                 if ((nfc->data & RXH_L4_B_0_1) ||
3888                     (nfc->data & RXH_L4_B_2_3))
3889                         return -EINVAL;
3890
3891                 req->ipv6_sctp_en = tuple_sets;
3892                 break;
3893         case IPV4_FLOW:
3894                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3895                 break;
3896         case IPV6_FLOW:
3897                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3898                 break;
3899         default:
3900                 return -EINVAL;
3901         }
3902
3903         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3904         if (ret) {
3905                 dev_err(&hdev->pdev->dev,
3906                         "Set rss tuple fail, status = %d\n", ret);
3907                 return ret;
3908         }
3909
3910         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3911         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3912         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3913         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3914         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3915         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3916         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3917         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3918         hclge_get_rss_type(vport);
3919         return 0;
3920 }
3921
3922 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3923                                struct ethtool_rxnfc *nfc)
3924 {
3925         struct hclge_vport *vport = hclge_get_vport(handle);
3926         u8 tuple_sets;
3927
3928         nfc->data = 0;
3929
3930         switch (nfc->flow_type) {
3931         case TCP_V4_FLOW:
3932                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3933                 break;
3934         case UDP_V4_FLOW:
3935                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3936                 break;
3937         case TCP_V6_FLOW:
3938                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3939                 break;
3940         case UDP_V6_FLOW:
3941                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3942                 break;
3943         case SCTP_V4_FLOW:
3944                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3945                 break;
3946         case SCTP_V6_FLOW:
3947                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3948                 break;
3949         case IPV4_FLOW:
3950         case IPV6_FLOW:
3951                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3952                 break;
3953         default:
3954                 return -EINVAL;
3955         }
3956
3957         if (!tuple_sets)
3958                 return 0;
3959
3960         if (tuple_sets & HCLGE_D_PORT_BIT)
3961                 nfc->data |= RXH_L4_B_2_3;
3962         if (tuple_sets & HCLGE_S_PORT_BIT)
3963                 nfc->data |= RXH_L4_B_0_1;
3964         if (tuple_sets & HCLGE_D_IP_BIT)
3965                 nfc->data |= RXH_IP_DST;
3966         if (tuple_sets & HCLGE_S_IP_BIT)
3967                 nfc->data |= RXH_IP_SRC;
3968
3969         return 0;
3970 }
3971
3972 static int hclge_get_tc_size(struct hnae3_handle *handle)
3973 {
3974         struct hclge_vport *vport = hclge_get_vport(handle);
3975         struct hclge_dev *hdev = vport->back;
3976
3977         return hdev->rss_size_max;
3978 }
3979
3980 int hclge_rss_init_hw(struct hclge_dev *hdev)
3981 {
3982         struct hclge_vport *vport = hdev->vport;
3983         u8 *rss_indir = vport[0].rss_indirection_tbl;
3984         u16 rss_size = vport[0].alloc_rss_size;
3985         u8 *key = vport[0].rss_hash_key;
3986         u8 hfunc = vport[0].rss_algo;
3987         u16 tc_offset[HCLGE_MAX_TC_NUM];
3988         u16 tc_valid[HCLGE_MAX_TC_NUM];
3989         u16 tc_size[HCLGE_MAX_TC_NUM];
3990         u16 roundup_size;
3991         int i, ret;
3992
3993         ret = hclge_set_rss_indir_table(hdev, rss_indir);
3994         if (ret)
3995                 return ret;
3996
3997         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3998         if (ret)
3999                 return ret;
4000
4001         ret = hclge_set_rss_input_tuple(hdev);
4002         if (ret)
4003                 return ret;
4004
4005         /* Each TC have the same queue size, and tc_size set to hardware is
4006          * the log2 of roundup power of two of rss_size, the acutal queue
4007          * size is limited by indirection table.
4008          */
4009         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4010                 dev_err(&hdev->pdev->dev,
4011                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4012                         rss_size);
4013                 return -EINVAL;
4014         }
4015
4016         roundup_size = roundup_pow_of_two(rss_size);
4017         roundup_size = ilog2(roundup_size);
4018
4019         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4020                 tc_valid[i] = 0;
4021
4022                 if (!(hdev->hw_tc_map & BIT(i)))
4023                         continue;
4024
4025                 tc_valid[i] = 1;
4026                 tc_size[i] = roundup_size;
4027                 tc_offset[i] = rss_size * i;
4028         }
4029
4030         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4031 }
4032
4033 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4034 {
4035         struct hclge_vport *vport = hdev->vport;
4036         int i, j;
4037
4038         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4039                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4040                         vport[j].rss_indirection_tbl[i] =
4041                                 i % vport[j].alloc_rss_size;
4042         }
4043 }
4044
4045 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4046 {
4047         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4048         struct hclge_vport *vport = hdev->vport;
4049
4050         if (hdev->pdev->revision >= 0x21)
4051                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4052
4053         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4054                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4055                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4056                 vport[i].rss_tuple_sets.ipv4_udp_en =
4057                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4058                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4059                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4060                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4061                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4062                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4063                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4064                 vport[i].rss_tuple_sets.ipv6_udp_en =
4065                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4066                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4067                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4068                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4069                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4070
4071                 vport[i].rss_algo = rss_algo;
4072
4073                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4074                        HCLGE_RSS_KEY_SIZE);
4075         }
4076
4077         hclge_rss_indir_init_cfg(hdev);
4078 }
4079
4080 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4081                                 int vector_id, bool en,
4082                                 struct hnae3_ring_chain_node *ring_chain)
4083 {
4084         struct hclge_dev *hdev = vport->back;
4085         struct hnae3_ring_chain_node *node;
4086         struct hclge_desc desc;
4087         struct hclge_ctrl_vector_chain_cmd *req
4088                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4089         enum hclge_cmd_status status;
4090         enum hclge_opcode_type op;
4091         u16 tqp_type_and_id;
4092         int i;
4093
4094         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4095         hclge_cmd_setup_basic_desc(&desc, op, false);
4096         req->int_vector_id = vector_id;
4097
4098         i = 0;
4099         for (node = ring_chain; node; node = node->next) {
4100                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4101                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4102                                 HCLGE_INT_TYPE_S,
4103                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4104                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4105                                 HCLGE_TQP_ID_S, node->tqp_index);
4106                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4107                                 HCLGE_INT_GL_IDX_S,
4108                                 hnae3_get_field(node->int_gl_idx,
4109                                                 HNAE3_RING_GL_IDX_M,
4110                                                 HNAE3_RING_GL_IDX_S));
4111                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4112                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4113                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4114                         req->vfid = vport->vport_id;
4115
4116                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4117                         if (status) {
4118                                 dev_err(&hdev->pdev->dev,
4119                                         "Map TQP fail, status is %d.\n",
4120                                         status);
4121                                 return -EIO;
4122                         }
4123                         i = 0;
4124
4125                         hclge_cmd_setup_basic_desc(&desc,
4126                                                    op,
4127                                                    false);
4128                         req->int_vector_id = vector_id;
4129                 }
4130         }
4131
4132         if (i > 0) {
4133                 req->int_cause_num = i;
4134                 req->vfid = vport->vport_id;
4135                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4136                 if (status) {
4137                         dev_err(&hdev->pdev->dev,
4138                                 "Map TQP fail, status is %d.\n", status);
4139                         return -EIO;
4140                 }
4141         }
4142
4143         return 0;
4144 }
4145
4146 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4147                                     int vector,
4148                                     struct hnae3_ring_chain_node *ring_chain)
4149 {
4150         struct hclge_vport *vport = hclge_get_vport(handle);
4151         struct hclge_dev *hdev = vport->back;
4152         int vector_id;
4153
4154         vector_id = hclge_get_vector_index(hdev, vector);
4155         if (vector_id < 0) {
4156                 dev_err(&hdev->pdev->dev,
4157                         "Get vector index fail. vector_id =%d\n", vector_id);
4158                 return vector_id;
4159         }
4160
4161         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4162 }
4163
4164 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4165                                        int vector,
4166                                        struct hnae3_ring_chain_node *ring_chain)
4167 {
4168         struct hclge_vport *vport = hclge_get_vport(handle);
4169         struct hclge_dev *hdev = vport->back;
4170         int vector_id, ret;
4171
4172         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4173                 return 0;
4174
4175         vector_id = hclge_get_vector_index(hdev, vector);
4176         if (vector_id < 0) {
4177                 dev_err(&handle->pdev->dev,
4178                         "Get vector index fail. ret =%d\n", vector_id);
4179                 return vector_id;
4180         }
4181
4182         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4183         if (ret)
4184                 dev_err(&handle->pdev->dev,
4185                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4186                         vector_id,
4187                         ret);
4188
4189         return ret;
4190 }
4191
4192 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4193                                struct hclge_promisc_param *param)
4194 {
4195         struct hclge_promisc_cfg_cmd *req;
4196         struct hclge_desc desc;
4197         int ret;
4198
4199         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4200
4201         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4202         req->vf_id = param->vf_id;
4203
4204         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4205          * pdev revision(0x20), new revision support them. The
4206          * value of this two fields will not return error when driver
4207          * send command to fireware in revision(0x20).
4208          */
4209         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4210                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4211
4212         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4213         if (ret)
4214                 dev_err(&hdev->pdev->dev,
4215                         "Set promisc mode fail, status is %d.\n", ret);
4216
4217         return ret;
4218 }
4219
4220 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4221                               bool en_mc, bool en_bc, int vport_id)
4222 {
4223         if (!param)
4224                 return;
4225
4226         memset(param, 0, sizeof(struct hclge_promisc_param));
4227         if (en_uc)
4228                 param->enable = HCLGE_PROMISC_EN_UC;
4229         if (en_mc)
4230                 param->enable |= HCLGE_PROMISC_EN_MC;
4231         if (en_bc)
4232                 param->enable |= HCLGE_PROMISC_EN_BC;
4233         param->vf_id = vport_id;
4234 }
4235
4236 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4237                                   bool en_mc_pmc)
4238 {
4239         struct hclge_vport *vport = hclge_get_vport(handle);
4240         struct hclge_dev *hdev = vport->back;
4241         struct hclge_promisc_param param;
4242         bool en_bc_pmc = true;
4243
4244         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4245          * always bypassed. So broadcast promisc should be disabled until
4246          * user enable promisc mode
4247          */
4248         if (handle->pdev->revision == 0x20)
4249                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4250
4251         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4252                                  vport->vport_id);
4253         return hclge_cmd_set_promisc_mode(hdev, &param);
4254 }
4255
4256 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4257 {
4258         struct hclge_get_fd_mode_cmd *req;
4259         struct hclge_desc desc;
4260         int ret;
4261
4262         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4263
4264         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4265
4266         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4267         if (ret) {
4268                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4269                 return ret;
4270         }
4271
4272         *fd_mode = req->mode;
4273
4274         return ret;
4275 }
4276
4277 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4278                                    u32 *stage1_entry_num,
4279                                    u32 *stage2_entry_num,
4280                                    u16 *stage1_counter_num,
4281                                    u16 *stage2_counter_num)
4282 {
4283         struct hclge_get_fd_allocation_cmd *req;
4284         struct hclge_desc desc;
4285         int ret;
4286
4287         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4288
4289         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4290
4291         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4292         if (ret) {
4293                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4294                         ret);
4295                 return ret;
4296         }
4297
4298         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4299         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4300         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4301         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4302
4303         return ret;
4304 }
4305
4306 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4307 {
4308         struct hclge_set_fd_key_config_cmd *req;
4309         struct hclge_fd_key_cfg *stage;
4310         struct hclge_desc desc;
4311         int ret;
4312
4313         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4314
4315         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4316         stage = &hdev->fd_cfg.key_cfg[stage_num];
4317         req->stage = stage_num;
4318         req->key_select = stage->key_sel;
4319         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4320         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4321         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4322         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4323         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4324         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4325
4326         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4327         if (ret)
4328                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4329
4330         return ret;
4331 }
4332
4333 static int hclge_init_fd_config(struct hclge_dev *hdev)
4334 {
4335 #define LOW_2_WORDS             0x03
4336         struct hclge_fd_key_cfg *key_cfg;
4337         int ret;
4338
4339         if (!hnae3_dev_fd_supported(hdev))
4340                 return 0;
4341
4342         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4343         if (ret)
4344                 return ret;
4345
4346         switch (hdev->fd_cfg.fd_mode) {
4347         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4348                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4349                 break;
4350         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4351                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4352                 break;
4353         default:
4354                 dev_err(&hdev->pdev->dev,
4355                         "Unsupported flow director mode %d\n",
4356                         hdev->fd_cfg.fd_mode);
4357                 return -EOPNOTSUPP;
4358         }
4359
4360         hdev->fd_cfg.proto_support =
4361                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4362                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4363         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4364         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4365         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4366         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4367         key_cfg->outer_sipv6_word_en = 0;
4368         key_cfg->outer_dipv6_word_en = 0;
4369
4370         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4371                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4372                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4373                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4374
4375         /* If use max 400bit key, we can support tuples for ether type */
4376         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4377                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4378                 key_cfg->tuple_active |=
4379                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4380         }
4381
4382         /* roce_type is used to filter roce frames
4383          * dst_vport is used to specify the rule
4384          */
4385         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4386
4387         ret = hclge_get_fd_allocation(hdev,
4388                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4389                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4390                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4391                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4392         if (ret)
4393                 return ret;
4394
4395         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4396 }
4397
4398 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4399                                 int loc, u8 *key, bool is_add)
4400 {
4401         struct hclge_fd_tcam_config_1_cmd *req1;
4402         struct hclge_fd_tcam_config_2_cmd *req2;
4403         struct hclge_fd_tcam_config_3_cmd *req3;
4404         struct hclge_desc desc[3];
4405         int ret;
4406
4407         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4408         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4409         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4410         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4411         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4412
4413         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4414         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4415         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4416
4417         req1->stage = stage;
4418         req1->xy_sel = sel_x ? 1 : 0;
4419         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4420         req1->index = cpu_to_le32(loc);
4421         req1->entry_vld = sel_x ? is_add : 0;
4422
4423         if (key) {
4424                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4425                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4426                        sizeof(req2->tcam_data));
4427                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4428                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4429         }
4430
4431         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4432         if (ret)
4433                 dev_err(&hdev->pdev->dev,
4434                         "config tcam key fail, ret=%d\n",
4435                         ret);
4436
4437         return ret;
4438 }
4439
4440 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4441                               struct hclge_fd_ad_data *action)
4442 {
4443         struct hclge_fd_ad_config_cmd *req;
4444         struct hclge_desc desc;
4445         u64 ad_data = 0;
4446         int ret;
4447
4448         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4449
4450         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4451         req->index = cpu_to_le32(loc);
4452         req->stage = stage;
4453
4454         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4455                       action->write_rule_id_to_bd);
4456         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4457                         action->rule_id);
4458         ad_data <<= 32;
4459         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4460         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4461                       action->forward_to_direct_queue);
4462         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4463                         action->queue_id);
4464         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4465         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4466                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4467         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4468         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4469                         action->counter_id);
4470
4471         req->ad_data = cpu_to_le64(ad_data);
4472         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4473         if (ret)
4474                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4475
4476         return ret;
4477 }
4478
4479 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4480                                    struct hclge_fd_rule *rule)
4481 {
4482         u16 tmp_x_s, tmp_y_s;
4483         u32 tmp_x_l, tmp_y_l;
4484         int i;
4485
4486         if (rule->unused_tuple & tuple_bit)
4487                 return true;
4488
4489         switch (tuple_bit) {
4490         case 0:
4491                 return false;
4492         case BIT(INNER_DST_MAC):
4493                 for (i = 0; i < 6; i++) {
4494                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4495                                rule->tuples_mask.dst_mac[i]);
4496                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4497                                rule->tuples_mask.dst_mac[i]);
4498                 }
4499
4500                 return true;
4501         case BIT(INNER_SRC_MAC):
4502                 for (i = 0; i < 6; i++) {
4503                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4504                                rule->tuples.src_mac[i]);
4505                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4506                                rule->tuples.src_mac[i]);
4507                 }
4508
4509                 return true;
4510         case BIT(INNER_VLAN_TAG_FST):
4511                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4512                        rule->tuples_mask.vlan_tag1);
4513                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4514                        rule->tuples_mask.vlan_tag1);
4515                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4516                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4517
4518                 return true;
4519         case BIT(INNER_ETH_TYPE):
4520                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4521                        rule->tuples_mask.ether_proto);
4522                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4523                        rule->tuples_mask.ether_proto);
4524                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4525                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4526
4527                 return true;
4528         case BIT(INNER_IP_TOS):
4529                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4530                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4531
4532                 return true;
4533         case BIT(INNER_IP_PROTO):
4534                 calc_x(*key_x, rule->tuples.ip_proto,
4535                        rule->tuples_mask.ip_proto);
4536                 calc_y(*key_y, rule->tuples.ip_proto,
4537                        rule->tuples_mask.ip_proto);
4538
4539                 return true;
4540         case BIT(INNER_SRC_IP):
4541                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4542                        rule->tuples_mask.src_ip[3]);
4543                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4544                        rule->tuples_mask.src_ip[3]);
4545                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4546                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4547
4548                 return true;
4549         case BIT(INNER_DST_IP):
4550                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4551                        rule->tuples_mask.dst_ip[3]);
4552                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4553                        rule->tuples_mask.dst_ip[3]);
4554                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4555                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4556
4557                 return true;
4558         case BIT(INNER_SRC_PORT):
4559                 calc_x(tmp_x_s, rule->tuples.src_port,
4560                        rule->tuples_mask.src_port);
4561                 calc_y(tmp_y_s, rule->tuples.src_port,
4562                        rule->tuples_mask.src_port);
4563                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4564                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4565
4566                 return true;
4567         case BIT(INNER_DST_PORT):
4568                 calc_x(tmp_x_s, rule->tuples.dst_port,
4569                        rule->tuples_mask.dst_port);
4570                 calc_y(tmp_y_s, rule->tuples.dst_port,
4571                        rule->tuples_mask.dst_port);
4572                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4573                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4574
4575                 return true;
4576         default:
4577                 return false;
4578         }
4579 }
4580
4581 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4582                                  u8 vf_id, u8 network_port_id)
4583 {
4584         u32 port_number = 0;
4585
4586         if (port_type == HOST_PORT) {
4587                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4588                                 pf_id);
4589                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4590                                 vf_id);
4591                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4592         } else {
4593                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4594                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4595                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4596         }
4597
4598         return port_number;
4599 }
4600
4601 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4602                                        __le32 *key_x, __le32 *key_y,
4603                                        struct hclge_fd_rule *rule)
4604 {
4605         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4606         u8 cur_pos = 0, tuple_size, shift_bits;
4607         int i;
4608
4609         for (i = 0; i < MAX_META_DATA; i++) {
4610                 tuple_size = meta_data_key_info[i].key_length;
4611                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4612
4613                 switch (tuple_bit) {
4614                 case BIT(ROCE_TYPE):
4615                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4616                         cur_pos += tuple_size;
4617                         break;
4618                 case BIT(DST_VPORT):
4619                         port_number = hclge_get_port_number(HOST_PORT, 0,
4620                                                             rule->vf_id, 0);
4621                         hnae3_set_field(meta_data,
4622                                         GENMASK(cur_pos + tuple_size, cur_pos),
4623                                         cur_pos, port_number);
4624                         cur_pos += tuple_size;
4625                         break;
4626                 default:
4627                         break;
4628                 }
4629         }
4630
4631         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4632         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4633         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4634
4635         *key_x = cpu_to_le32(tmp_x << shift_bits);
4636         *key_y = cpu_to_le32(tmp_y << shift_bits);
4637 }
4638
4639 /* A complete key is combined with meta data key and tuple key.
4640  * Meta data key is stored at the MSB region, and tuple key is stored at
4641  * the LSB region, unused bits will be filled 0.
4642  */
4643 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4644                             struct hclge_fd_rule *rule)
4645 {
4646         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4647         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4648         u8 *cur_key_x, *cur_key_y;
4649         int i, ret, tuple_size;
4650         u8 meta_data_region;
4651
4652         memset(key_x, 0, sizeof(key_x));
4653         memset(key_y, 0, sizeof(key_y));
4654         cur_key_x = key_x;
4655         cur_key_y = key_y;
4656
4657         for (i = 0 ; i < MAX_TUPLE; i++) {
4658                 bool tuple_valid;
4659                 u32 check_tuple;
4660
4661                 tuple_size = tuple_key_info[i].key_length / 8;
4662                 check_tuple = key_cfg->tuple_active & BIT(i);
4663
4664                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4665                                                      cur_key_y, rule);
4666                 if (tuple_valid) {
4667                         cur_key_x += tuple_size;
4668                         cur_key_y += tuple_size;
4669                 }
4670         }
4671
4672         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4673                         MAX_META_DATA_LENGTH / 8;
4674
4675         hclge_fd_convert_meta_data(key_cfg,
4676                                    (__le32 *)(key_x + meta_data_region),
4677                                    (__le32 *)(key_y + meta_data_region),
4678                                    rule);
4679
4680         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4681                                    true);
4682         if (ret) {
4683                 dev_err(&hdev->pdev->dev,
4684                         "fd key_y config fail, loc=%d, ret=%d\n",
4685                         rule->queue_id, ret);
4686                 return ret;
4687         }
4688
4689         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4690                                    true);
4691         if (ret)
4692                 dev_err(&hdev->pdev->dev,
4693                         "fd key_x config fail, loc=%d, ret=%d\n",
4694                         rule->queue_id, ret);
4695         return ret;
4696 }
4697
4698 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4699                                struct hclge_fd_rule *rule)
4700 {
4701         struct hclge_fd_ad_data ad_data;
4702
4703         ad_data.ad_id = rule->location;
4704
4705         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4706                 ad_data.drop_packet = true;
4707                 ad_data.forward_to_direct_queue = false;
4708                 ad_data.queue_id = 0;
4709         } else {
4710                 ad_data.drop_packet = false;
4711                 ad_data.forward_to_direct_queue = true;
4712                 ad_data.queue_id = rule->queue_id;
4713         }
4714
4715         ad_data.use_counter = false;
4716         ad_data.counter_id = 0;
4717
4718         ad_data.use_next_stage = false;
4719         ad_data.next_input_key = 0;
4720
4721         ad_data.write_rule_id_to_bd = true;
4722         ad_data.rule_id = rule->location;
4723
4724         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4725 }
4726
4727 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4728                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4729 {
4730         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4731         struct ethtool_usrip4_spec *usr_ip4_spec;
4732         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4733         struct ethtool_usrip6_spec *usr_ip6_spec;
4734         struct ethhdr *ether_spec;
4735
4736         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4737                 return -EINVAL;
4738
4739         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4740                 return -EOPNOTSUPP;
4741
4742         if ((fs->flow_type & FLOW_EXT) &&
4743             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4744                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4745                 return -EOPNOTSUPP;
4746         }
4747
4748         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4749         case SCTP_V4_FLOW:
4750         case TCP_V4_FLOW:
4751         case UDP_V4_FLOW:
4752                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4753                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4754
4755                 if (!tcp_ip4_spec->ip4src)
4756                         *unused |= BIT(INNER_SRC_IP);
4757
4758                 if (!tcp_ip4_spec->ip4dst)
4759                         *unused |= BIT(INNER_DST_IP);
4760
4761                 if (!tcp_ip4_spec->psrc)
4762                         *unused |= BIT(INNER_SRC_PORT);
4763
4764                 if (!tcp_ip4_spec->pdst)
4765                         *unused |= BIT(INNER_DST_PORT);
4766
4767                 if (!tcp_ip4_spec->tos)
4768                         *unused |= BIT(INNER_IP_TOS);
4769
4770                 break;
4771         case IP_USER_FLOW:
4772                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4773                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4774                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4775
4776                 if (!usr_ip4_spec->ip4src)
4777                         *unused |= BIT(INNER_SRC_IP);
4778
4779                 if (!usr_ip4_spec->ip4dst)
4780                         *unused |= BIT(INNER_DST_IP);
4781
4782                 if (!usr_ip4_spec->tos)
4783                         *unused |= BIT(INNER_IP_TOS);
4784
4785                 if (!usr_ip4_spec->proto)
4786                         *unused |= BIT(INNER_IP_PROTO);
4787
4788                 if (usr_ip4_spec->l4_4_bytes)
4789                         return -EOPNOTSUPP;
4790
4791                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4792                         return -EOPNOTSUPP;
4793
4794                 break;
4795         case SCTP_V6_FLOW:
4796         case TCP_V6_FLOW:
4797         case UDP_V6_FLOW:
4798                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4799                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4800                         BIT(INNER_IP_TOS);
4801
4802                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4803                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4804                         *unused |= BIT(INNER_SRC_IP);
4805
4806                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4807                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4808                         *unused |= BIT(INNER_DST_IP);
4809
4810                 if (!tcp_ip6_spec->psrc)
4811                         *unused |= BIT(INNER_SRC_PORT);
4812
4813                 if (!tcp_ip6_spec->pdst)
4814                         *unused |= BIT(INNER_DST_PORT);
4815
4816                 if (tcp_ip6_spec->tclass)
4817                         return -EOPNOTSUPP;
4818
4819                 break;
4820         case IPV6_USER_FLOW:
4821                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4822                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4823                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4824                         BIT(INNER_DST_PORT);
4825
4826                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4827                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4828                         *unused |= BIT(INNER_SRC_IP);
4829
4830                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4831                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4832                         *unused |= BIT(INNER_DST_IP);
4833
4834                 if (!usr_ip6_spec->l4_proto)
4835                         *unused |= BIT(INNER_IP_PROTO);
4836
4837                 if (usr_ip6_spec->tclass)
4838                         return -EOPNOTSUPP;
4839
4840                 if (usr_ip6_spec->l4_4_bytes)
4841                         return -EOPNOTSUPP;
4842
4843                 break;
4844         case ETHER_FLOW:
4845                 ether_spec = &fs->h_u.ether_spec;
4846                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4847                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4848                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4849
4850                 if (is_zero_ether_addr(ether_spec->h_source))
4851                         *unused |= BIT(INNER_SRC_MAC);
4852
4853                 if (is_zero_ether_addr(ether_spec->h_dest))
4854                         *unused |= BIT(INNER_DST_MAC);
4855
4856                 if (!ether_spec->h_proto)
4857                         *unused |= BIT(INNER_ETH_TYPE);
4858
4859                 break;
4860         default:
4861                 return -EOPNOTSUPP;
4862         }
4863
4864         if ((fs->flow_type & FLOW_EXT)) {
4865                 if (fs->h_ext.vlan_etype)
4866                         return -EOPNOTSUPP;
4867                 if (!fs->h_ext.vlan_tci)
4868                         *unused |= BIT(INNER_VLAN_TAG_FST);
4869
4870                 if (fs->m_ext.vlan_tci) {
4871                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4872                                 return -EINVAL;
4873                 }
4874         } else {
4875                 *unused |= BIT(INNER_VLAN_TAG_FST);
4876         }
4877
4878         if (fs->flow_type & FLOW_MAC_EXT) {
4879                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4880                         return -EOPNOTSUPP;
4881
4882                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4883                         *unused |= BIT(INNER_DST_MAC);
4884                 else
4885                         *unused &= ~(BIT(INNER_DST_MAC));
4886         }
4887
4888         return 0;
4889 }
4890
4891 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4892 {
4893         struct hclge_fd_rule *rule = NULL;
4894         struct hlist_node *node2;
4895
4896         spin_lock_bh(&hdev->fd_rule_lock);
4897         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4898                 if (rule->location >= location)
4899                         break;
4900         }
4901
4902         spin_unlock_bh(&hdev->fd_rule_lock);
4903
4904         return  rule && rule->location == location;
4905 }
4906
4907 /* make sure being called after lock up with fd_rule_lock */
4908 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4909                                      struct hclge_fd_rule *new_rule,
4910                                      u16 location,
4911                                      bool is_add)
4912 {
4913         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4914         struct hlist_node *node2;
4915
4916         if (is_add && !new_rule)
4917                 return -EINVAL;
4918
4919         hlist_for_each_entry_safe(rule, node2,
4920                                   &hdev->fd_rule_list, rule_node) {
4921                 if (rule->location >= location)
4922                         break;
4923                 parent = rule;
4924         }
4925
4926         if (rule && rule->location == location) {
4927                 hlist_del(&rule->rule_node);
4928                 kfree(rule);
4929                 hdev->hclge_fd_rule_num--;
4930
4931                 if (!is_add) {
4932                         if (!hdev->hclge_fd_rule_num)
4933                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4934                         clear_bit(location, hdev->fd_bmap);
4935
4936                         return 0;
4937                 }
4938         } else if (!is_add) {
4939                 dev_err(&hdev->pdev->dev,
4940                         "delete fail, rule %d is inexistent\n",
4941                         location);
4942                 return -EINVAL;
4943         }
4944
4945         INIT_HLIST_NODE(&new_rule->rule_node);
4946
4947         if (parent)
4948                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4949         else
4950                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4951
4952         set_bit(location, hdev->fd_bmap);
4953         hdev->hclge_fd_rule_num++;
4954         hdev->fd_active_type = new_rule->rule_type;
4955
4956         return 0;
4957 }
4958
4959 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4960                               struct ethtool_rx_flow_spec *fs,
4961                               struct hclge_fd_rule *rule)
4962 {
4963         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4964
4965         switch (flow_type) {
4966         case SCTP_V4_FLOW:
4967         case TCP_V4_FLOW:
4968         case UDP_V4_FLOW:
4969                 rule->tuples.src_ip[3] =
4970                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4971                 rule->tuples_mask.src_ip[3] =
4972                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4973
4974                 rule->tuples.dst_ip[3] =
4975                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4976                 rule->tuples_mask.dst_ip[3] =
4977                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4978
4979                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4980                 rule->tuples_mask.src_port =
4981                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4982
4983                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4984                 rule->tuples_mask.dst_port =
4985                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4986
4987                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4988                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4989
4990                 rule->tuples.ether_proto = ETH_P_IP;
4991                 rule->tuples_mask.ether_proto = 0xFFFF;
4992
4993                 break;
4994         case IP_USER_FLOW:
4995                 rule->tuples.src_ip[3] =
4996                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4997                 rule->tuples_mask.src_ip[3] =
4998                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4999
5000                 rule->tuples.dst_ip[3] =
5001                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5002                 rule->tuples_mask.dst_ip[3] =
5003                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5004
5005                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5006                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5007
5008                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5009                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5010
5011                 rule->tuples.ether_proto = ETH_P_IP;
5012                 rule->tuples_mask.ether_proto = 0xFFFF;
5013
5014                 break;
5015         case SCTP_V6_FLOW:
5016         case TCP_V6_FLOW:
5017         case UDP_V6_FLOW:
5018                 be32_to_cpu_array(rule->tuples.src_ip,
5019                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
5020                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5021                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
5022
5023                 be32_to_cpu_array(rule->tuples.dst_ip,
5024                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
5025                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5026                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
5027
5028                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5029                 rule->tuples_mask.src_port =
5030                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5031
5032                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5033                 rule->tuples_mask.dst_port =
5034                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5035
5036                 rule->tuples.ether_proto = ETH_P_IPV6;
5037                 rule->tuples_mask.ether_proto = 0xFFFF;
5038
5039                 break;
5040         case IPV6_USER_FLOW:
5041                 be32_to_cpu_array(rule->tuples.src_ip,
5042                                   fs->h_u.usr_ip6_spec.ip6src, 4);
5043                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5044                                   fs->m_u.usr_ip6_spec.ip6src, 4);
5045
5046                 be32_to_cpu_array(rule->tuples.dst_ip,
5047                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
5048                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5049                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
5050
5051                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5052                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5053
5054                 rule->tuples.ether_proto = ETH_P_IPV6;
5055                 rule->tuples_mask.ether_proto = 0xFFFF;
5056
5057                 break;
5058         case ETHER_FLOW:
5059                 ether_addr_copy(rule->tuples.src_mac,
5060                                 fs->h_u.ether_spec.h_source);
5061                 ether_addr_copy(rule->tuples_mask.src_mac,
5062                                 fs->m_u.ether_spec.h_source);
5063
5064                 ether_addr_copy(rule->tuples.dst_mac,
5065                                 fs->h_u.ether_spec.h_dest);
5066                 ether_addr_copy(rule->tuples_mask.dst_mac,
5067                                 fs->m_u.ether_spec.h_dest);
5068
5069                 rule->tuples.ether_proto =
5070                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5071                 rule->tuples_mask.ether_proto =
5072                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5073
5074                 break;
5075         default:
5076                 return -EOPNOTSUPP;
5077         }
5078
5079         switch (flow_type) {
5080         case SCTP_V4_FLOW:
5081         case SCTP_V6_FLOW:
5082                 rule->tuples.ip_proto = IPPROTO_SCTP;
5083                 rule->tuples_mask.ip_proto = 0xFF;
5084                 break;
5085         case TCP_V4_FLOW:
5086         case TCP_V6_FLOW:
5087                 rule->tuples.ip_proto = IPPROTO_TCP;
5088                 rule->tuples_mask.ip_proto = 0xFF;
5089                 break;
5090         case UDP_V4_FLOW:
5091         case UDP_V6_FLOW:
5092                 rule->tuples.ip_proto = IPPROTO_UDP;
5093                 rule->tuples_mask.ip_proto = 0xFF;
5094                 break;
5095         default:
5096                 break;
5097         }
5098
5099         if ((fs->flow_type & FLOW_EXT)) {
5100                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5101                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5102         }
5103
5104         if (fs->flow_type & FLOW_MAC_EXT) {
5105                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5106                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5107         }
5108
5109         return 0;
5110 }
5111
5112 /* make sure being called after lock up with fd_rule_lock */
5113 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5114                                 struct hclge_fd_rule *rule)
5115 {
5116         int ret;
5117
5118         if (!rule) {
5119                 dev_err(&hdev->pdev->dev,
5120                         "The flow director rule is NULL\n");
5121                 return -EINVAL;
5122         }
5123
5124         /* it will never fail here, so needn't to check return value */
5125         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5126
5127         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5128         if (ret)
5129                 goto clear_rule;
5130
5131         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5132         if (ret)
5133                 goto clear_rule;
5134
5135         return 0;
5136
5137 clear_rule:
5138         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5139         return ret;
5140 }
5141
5142 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5143                               struct ethtool_rxnfc *cmd)
5144 {
5145         struct hclge_vport *vport = hclge_get_vport(handle);
5146         struct hclge_dev *hdev = vport->back;
5147         u16 dst_vport_id = 0, q_index = 0;
5148         struct ethtool_rx_flow_spec *fs;
5149         struct hclge_fd_rule *rule;
5150         u32 unused = 0;
5151         u8 action;
5152         int ret;
5153
5154         if (!hnae3_dev_fd_supported(hdev))
5155                 return -EOPNOTSUPP;
5156
5157         if (!hdev->fd_en) {
5158                 dev_warn(&hdev->pdev->dev,
5159                          "Please enable flow director first\n");
5160                 return -EOPNOTSUPP;
5161         }
5162
5163         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5164
5165         ret = hclge_fd_check_spec(hdev, fs, &unused);
5166         if (ret) {
5167                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5168                 return ret;
5169         }
5170
5171         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5172                 action = HCLGE_FD_ACTION_DROP_PACKET;
5173         } else {
5174                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5175                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5176                 u16 tqps;
5177
5178                 if (vf > hdev->num_req_vfs) {
5179                         dev_err(&hdev->pdev->dev,
5180                                 "Error: vf id (%d) > max vf num (%d)\n",
5181                                 vf, hdev->num_req_vfs);
5182                         return -EINVAL;
5183                 }
5184
5185                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5186                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5187
5188                 if (ring >= tqps) {
5189                         dev_err(&hdev->pdev->dev,
5190                                 "Error: queue id (%d) > max tqp num (%d)\n",
5191                                 ring, tqps - 1);
5192                         return -EINVAL;
5193                 }
5194
5195                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5196                 q_index = ring;
5197         }
5198
5199         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5200         if (!rule)
5201                 return -ENOMEM;
5202
5203         ret = hclge_fd_get_tuple(hdev, fs, rule);
5204         if (ret) {
5205                 kfree(rule);
5206                 return ret;
5207         }
5208
5209         rule->flow_type = fs->flow_type;
5210
5211         rule->location = fs->location;
5212         rule->unused_tuple = unused;
5213         rule->vf_id = dst_vport_id;
5214         rule->queue_id = q_index;
5215         rule->action = action;
5216         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5217
5218         /* to avoid rule conflict, when user configure rule by ethtool,
5219          * we need to clear all arfs rules
5220          */
5221         hclge_clear_arfs_rules(handle);
5222
5223         spin_lock_bh(&hdev->fd_rule_lock);
5224         ret = hclge_fd_config_rule(hdev, rule);
5225
5226         spin_unlock_bh(&hdev->fd_rule_lock);
5227
5228         return ret;
5229 }
5230
5231 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5232                               struct ethtool_rxnfc *cmd)
5233 {
5234         struct hclge_vport *vport = hclge_get_vport(handle);
5235         struct hclge_dev *hdev = vport->back;
5236         struct ethtool_rx_flow_spec *fs;
5237         int ret;
5238
5239         if (!hnae3_dev_fd_supported(hdev))
5240                 return -EOPNOTSUPP;
5241
5242         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5243
5244         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5245                 return -EINVAL;
5246
5247         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5248                 dev_err(&hdev->pdev->dev,
5249                         "Delete fail, rule %d is inexistent\n",
5250                         fs->location);
5251                 return -ENOENT;
5252         }
5253
5254         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5255                                    fs->location, NULL, false);
5256         if (ret)
5257                 return ret;
5258
5259         spin_lock_bh(&hdev->fd_rule_lock);
5260         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5261
5262         spin_unlock_bh(&hdev->fd_rule_lock);
5263
5264         return ret;
5265 }
5266
5267 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5268                                      bool clear_list)
5269 {
5270         struct hclge_vport *vport = hclge_get_vport(handle);
5271         struct hclge_dev *hdev = vport->back;
5272         struct hclge_fd_rule *rule;
5273         struct hlist_node *node;
5274         u16 location;
5275
5276         if (!hnae3_dev_fd_supported(hdev))
5277                 return;
5278
5279         spin_lock_bh(&hdev->fd_rule_lock);
5280         for_each_set_bit(location, hdev->fd_bmap,
5281                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5282                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5283                                      NULL, false);
5284
5285         if (clear_list) {
5286                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5287                                           rule_node) {
5288                         hlist_del(&rule->rule_node);
5289                         kfree(rule);
5290                 }
5291                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5292                 hdev->hclge_fd_rule_num = 0;
5293                 bitmap_zero(hdev->fd_bmap,
5294                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5295         }
5296
5297         spin_unlock_bh(&hdev->fd_rule_lock);
5298 }
5299
5300 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5301 {
5302         struct hclge_vport *vport = hclge_get_vport(handle);
5303         struct hclge_dev *hdev = vport->back;
5304         struct hclge_fd_rule *rule;
5305         struct hlist_node *node;
5306         int ret;
5307
5308         /* Return ok here, because reset error handling will check this
5309          * return value. If error is returned here, the reset process will
5310          * fail.
5311          */
5312         if (!hnae3_dev_fd_supported(hdev))
5313                 return 0;
5314
5315         /* if fd is disabled, should not restore it when reset */
5316         if (!hdev->fd_en)
5317                 return 0;
5318
5319         spin_lock_bh(&hdev->fd_rule_lock);
5320         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5321                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5322                 if (!ret)
5323                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5324
5325                 if (ret) {
5326                         dev_warn(&hdev->pdev->dev,
5327                                  "Restore rule %d failed, remove it\n",
5328                                  rule->location);
5329                         clear_bit(rule->location, hdev->fd_bmap);
5330                         hlist_del(&rule->rule_node);
5331                         kfree(rule);
5332                         hdev->hclge_fd_rule_num--;
5333                 }
5334         }
5335
5336         if (hdev->hclge_fd_rule_num)
5337                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5338
5339         spin_unlock_bh(&hdev->fd_rule_lock);
5340
5341         return 0;
5342 }
5343
5344 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5345                                  struct ethtool_rxnfc *cmd)
5346 {
5347         struct hclge_vport *vport = hclge_get_vport(handle);
5348         struct hclge_dev *hdev = vport->back;
5349
5350         if (!hnae3_dev_fd_supported(hdev))
5351                 return -EOPNOTSUPP;
5352
5353         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5354         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5355
5356         return 0;
5357 }
5358
5359 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5360                                   struct ethtool_rxnfc *cmd)
5361 {
5362         struct hclge_vport *vport = hclge_get_vport(handle);
5363         struct hclge_fd_rule *rule = NULL;
5364         struct hclge_dev *hdev = vport->back;
5365         struct ethtool_rx_flow_spec *fs;
5366         struct hlist_node *node2;
5367
5368         if (!hnae3_dev_fd_supported(hdev))
5369                 return -EOPNOTSUPP;
5370
5371         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5372
5373         spin_lock_bh(&hdev->fd_rule_lock);
5374
5375         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5376                 if (rule->location >= fs->location)
5377                         break;
5378         }
5379
5380         if (!rule || fs->location != rule->location) {
5381                 spin_unlock_bh(&hdev->fd_rule_lock);
5382
5383                 return -ENOENT;
5384         }
5385
5386         fs->flow_type = rule->flow_type;
5387         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5388         case SCTP_V4_FLOW:
5389         case TCP_V4_FLOW:
5390         case UDP_V4_FLOW:
5391                 fs->h_u.tcp_ip4_spec.ip4src =
5392                                 cpu_to_be32(rule->tuples.src_ip[3]);
5393                 fs->m_u.tcp_ip4_spec.ip4src =
5394                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5395                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5396
5397                 fs->h_u.tcp_ip4_spec.ip4dst =
5398                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5399                 fs->m_u.tcp_ip4_spec.ip4dst =
5400                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5401                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5402
5403                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5404                 fs->m_u.tcp_ip4_spec.psrc =
5405                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5406                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5407
5408                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5409                 fs->m_u.tcp_ip4_spec.pdst =
5410                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5411                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5412
5413                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5414                 fs->m_u.tcp_ip4_spec.tos =
5415                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5416                                 0 : rule->tuples_mask.ip_tos;
5417
5418                 break;
5419         case IP_USER_FLOW:
5420                 fs->h_u.usr_ip4_spec.ip4src =
5421                                 cpu_to_be32(rule->tuples.src_ip[3]);
5422                 fs->m_u.tcp_ip4_spec.ip4src =
5423                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5424                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5425
5426                 fs->h_u.usr_ip4_spec.ip4dst =
5427                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5428                 fs->m_u.usr_ip4_spec.ip4dst =
5429                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5430                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5431
5432                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5433                 fs->m_u.usr_ip4_spec.tos =
5434                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5435                                 0 : rule->tuples_mask.ip_tos;
5436
5437                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5438                 fs->m_u.usr_ip4_spec.proto =
5439                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5440                                 0 : rule->tuples_mask.ip_proto;
5441
5442                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5443
5444                 break;
5445         case SCTP_V6_FLOW:
5446         case TCP_V6_FLOW:
5447         case UDP_V6_FLOW:
5448                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5449                                   rule->tuples.src_ip, 4);
5450                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5451                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5452                 else
5453                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5454                                           rule->tuples_mask.src_ip, 4);
5455
5456                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5457                                   rule->tuples.dst_ip, 4);
5458                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5459                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5460                 else
5461                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5462                                           rule->tuples_mask.dst_ip, 4);
5463
5464                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5465                 fs->m_u.tcp_ip6_spec.psrc =
5466                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5467                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5468
5469                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5470                 fs->m_u.tcp_ip6_spec.pdst =
5471                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5472                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5473
5474                 break;
5475         case IPV6_USER_FLOW:
5476                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5477                                   rule->tuples.src_ip, 4);
5478                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5479                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5480                 else
5481                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5482                                           rule->tuples_mask.src_ip, 4);
5483
5484                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5485                                   rule->tuples.dst_ip, 4);
5486                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5487                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5488                 else
5489                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5490                                           rule->tuples_mask.dst_ip, 4);
5491
5492                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5493                 fs->m_u.usr_ip6_spec.l4_proto =
5494                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5495                                 0 : rule->tuples_mask.ip_proto;
5496
5497                 break;
5498         case ETHER_FLOW:
5499                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5500                                 rule->tuples.src_mac);
5501                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5502                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5503                 else
5504                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5505                                         rule->tuples_mask.src_mac);
5506
5507                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5508                                 rule->tuples.dst_mac);
5509                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5510                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5511                 else
5512                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5513                                         rule->tuples_mask.dst_mac);
5514
5515                 fs->h_u.ether_spec.h_proto =
5516                                 cpu_to_be16(rule->tuples.ether_proto);
5517                 fs->m_u.ether_spec.h_proto =
5518                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5519                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5520
5521                 break;
5522         default:
5523                 spin_unlock_bh(&hdev->fd_rule_lock);
5524                 return -EOPNOTSUPP;
5525         }
5526
5527         if (fs->flow_type & FLOW_EXT) {
5528                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5529                 fs->m_ext.vlan_tci =
5530                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5531                                 cpu_to_be16(VLAN_VID_MASK) :
5532                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5533         }
5534
5535         if (fs->flow_type & FLOW_MAC_EXT) {
5536                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5537                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5538                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5539                 else
5540                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5541                                         rule->tuples_mask.dst_mac);
5542         }
5543
5544         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5545                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5546         } else {
5547                 u64 vf_id;
5548
5549                 fs->ring_cookie = rule->queue_id;
5550                 vf_id = rule->vf_id;
5551                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5552                 fs->ring_cookie |= vf_id;
5553         }
5554
5555         spin_unlock_bh(&hdev->fd_rule_lock);
5556
5557         return 0;
5558 }
5559
5560 static int hclge_get_all_rules(struct hnae3_handle *handle,
5561                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5562 {
5563         struct hclge_vport *vport = hclge_get_vport(handle);
5564         struct hclge_dev *hdev = vport->back;
5565         struct hclge_fd_rule *rule;
5566         struct hlist_node *node2;
5567         int cnt = 0;
5568
5569         if (!hnae3_dev_fd_supported(hdev))
5570                 return -EOPNOTSUPP;
5571
5572         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5573
5574         spin_lock_bh(&hdev->fd_rule_lock);
5575         hlist_for_each_entry_safe(rule, node2,
5576                                   &hdev->fd_rule_list, rule_node) {
5577                 if (cnt == cmd->rule_cnt) {
5578                         spin_unlock_bh(&hdev->fd_rule_lock);
5579                         return -EMSGSIZE;
5580                 }
5581
5582                 rule_locs[cnt] = rule->location;
5583                 cnt++;
5584         }
5585
5586         spin_unlock_bh(&hdev->fd_rule_lock);
5587
5588         cmd->rule_cnt = cnt;
5589
5590         return 0;
5591 }
5592
5593 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5594                                      struct hclge_fd_rule_tuples *tuples)
5595 {
5596         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5597         tuples->ip_proto = fkeys->basic.ip_proto;
5598         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5599
5600         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5601                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5602                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5603         } else {
5604                 memcpy(tuples->src_ip,
5605                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5606                        sizeof(tuples->src_ip));
5607                 memcpy(tuples->dst_ip,
5608                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5609                        sizeof(tuples->dst_ip));
5610         }
5611 }
5612
5613 /* traverse all rules, check whether an existed rule has the same tuples */
5614 static struct hclge_fd_rule *
5615 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5616                           const struct hclge_fd_rule_tuples *tuples)
5617 {
5618         struct hclge_fd_rule *rule = NULL;
5619         struct hlist_node *node;
5620
5621         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5622                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5623                         return rule;
5624         }
5625
5626         return NULL;
5627 }
5628
5629 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5630                                      struct hclge_fd_rule *rule)
5631 {
5632         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5633                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5634                              BIT(INNER_SRC_PORT);
5635         rule->action = 0;
5636         rule->vf_id = 0;
5637         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5638         if (tuples->ether_proto == ETH_P_IP) {
5639                 if (tuples->ip_proto == IPPROTO_TCP)
5640                         rule->flow_type = TCP_V4_FLOW;
5641                 else
5642                         rule->flow_type = UDP_V4_FLOW;
5643         } else {
5644                 if (tuples->ip_proto == IPPROTO_TCP)
5645                         rule->flow_type = TCP_V6_FLOW;
5646                 else
5647                         rule->flow_type = UDP_V6_FLOW;
5648         }
5649         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5650         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5651 }
5652
5653 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5654                                       u16 flow_id, struct flow_keys *fkeys)
5655 {
5656         struct hclge_vport *vport = hclge_get_vport(handle);
5657         struct hclge_fd_rule_tuples new_tuples;
5658         struct hclge_dev *hdev = vport->back;
5659         struct hclge_fd_rule *rule;
5660         u16 tmp_queue_id;
5661         u16 bit_id;
5662         int ret;
5663
5664         if (!hnae3_dev_fd_supported(hdev))
5665                 return -EOPNOTSUPP;
5666
5667         memset(&new_tuples, 0, sizeof(new_tuples));
5668         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5669
5670         spin_lock_bh(&hdev->fd_rule_lock);
5671
5672         /* when there is already fd rule existed add by user,
5673          * arfs should not work
5674          */
5675         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5676                 spin_unlock_bh(&hdev->fd_rule_lock);
5677
5678                 return -EOPNOTSUPP;
5679         }
5680
5681         /* check is there flow director filter existed for this flow,
5682          * if not, create a new filter for it;
5683          * if filter exist with different queue id, modify the filter;
5684          * if filter exist with same queue id, do nothing
5685          */
5686         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5687         if (!rule) {
5688                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5689                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5690                         spin_unlock_bh(&hdev->fd_rule_lock);
5691
5692                         return -ENOSPC;
5693                 }
5694
5695                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5696                 if (!rule) {
5697                         spin_unlock_bh(&hdev->fd_rule_lock);
5698
5699                         return -ENOMEM;
5700                 }
5701
5702                 set_bit(bit_id, hdev->fd_bmap);
5703                 rule->location = bit_id;
5704                 rule->flow_id = flow_id;
5705                 rule->queue_id = queue_id;
5706                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5707                 ret = hclge_fd_config_rule(hdev, rule);
5708
5709                 spin_unlock_bh(&hdev->fd_rule_lock);
5710
5711                 if (ret)
5712                         return ret;
5713
5714                 return rule->location;
5715         }
5716
5717         spin_unlock_bh(&hdev->fd_rule_lock);
5718
5719         if (rule->queue_id == queue_id)
5720                 return rule->location;
5721
5722         tmp_queue_id = rule->queue_id;
5723         rule->queue_id = queue_id;
5724         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5725         if (ret) {
5726                 rule->queue_id = tmp_queue_id;
5727                 return ret;
5728         }
5729
5730         return rule->location;
5731 }
5732
5733 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5734 {
5735 #ifdef CONFIG_RFS_ACCEL
5736         struct hnae3_handle *handle = &hdev->vport[0].nic;
5737         struct hclge_fd_rule *rule;
5738         struct hlist_node *node;
5739         HLIST_HEAD(del_list);
5740
5741         spin_lock_bh(&hdev->fd_rule_lock);
5742         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5743                 spin_unlock_bh(&hdev->fd_rule_lock);
5744                 return;
5745         }
5746         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5747                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5748                                         rule->flow_id, rule->location)) {
5749                         hlist_del_init(&rule->rule_node);
5750                         hlist_add_head(&rule->rule_node, &del_list);
5751                         hdev->hclge_fd_rule_num--;
5752                         clear_bit(rule->location, hdev->fd_bmap);
5753                 }
5754         }
5755         spin_unlock_bh(&hdev->fd_rule_lock);
5756
5757         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5758                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5759                                      rule->location, NULL, false);
5760                 kfree(rule);
5761         }
5762 #endif
5763 }
5764
5765 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5766 {
5767 #ifdef CONFIG_RFS_ACCEL
5768         struct hclge_vport *vport = hclge_get_vport(handle);
5769         struct hclge_dev *hdev = vport->back;
5770
5771         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5772                 hclge_del_all_fd_entries(handle, true);
5773 #endif
5774 }
5775
5776 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5777 {
5778         struct hclge_vport *vport = hclge_get_vport(handle);
5779         struct hclge_dev *hdev = vport->back;
5780
5781         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5782                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5783 }
5784
5785 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5786 {
5787         struct hclge_vport *vport = hclge_get_vport(handle);
5788         struct hclge_dev *hdev = vport->back;
5789
5790         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5791 }
5792
5793 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5794 {
5795         struct hclge_vport *vport = hclge_get_vport(handle);
5796         struct hclge_dev *hdev = vport->back;
5797
5798         return hdev->rst_stats.hw_reset_done_cnt;
5799 }
5800
5801 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5802 {
5803         struct hclge_vport *vport = hclge_get_vport(handle);
5804         struct hclge_dev *hdev = vport->back;
5805         bool clear;
5806
5807         hdev->fd_en = enable;
5808         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5809         if (!enable)
5810                 hclge_del_all_fd_entries(handle, clear);
5811         else
5812                 hclge_restore_fd_entries(handle);
5813 }
5814
5815 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5816 {
5817         struct hclge_desc desc;
5818         struct hclge_config_mac_mode_cmd *req =
5819                 (struct hclge_config_mac_mode_cmd *)desc.data;
5820         u32 loop_en = 0;
5821         int ret;
5822
5823         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5824         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5825         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5826         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5827         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5828         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5829         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5830         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5831         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5832         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5833         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5834         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5835         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5836         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5837         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5838         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5839
5840         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5841         if (ret)
5842                 dev_err(&hdev->pdev->dev,
5843                         "mac enable fail, ret =%d.\n", ret);
5844 }
5845
5846 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5847 {
5848         struct hclge_config_mac_mode_cmd *req;
5849         struct hclge_desc desc;
5850         u32 loop_en;
5851         int ret;
5852
5853         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5854         /* 1 Read out the MAC mode config at first */
5855         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5856         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5857         if (ret) {
5858                 dev_err(&hdev->pdev->dev,
5859                         "mac loopback get fail, ret =%d.\n", ret);
5860                 return ret;
5861         }
5862
5863         /* 2 Then setup the loopback flag */
5864         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5865         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5866         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5867         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5868
5869         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5870
5871         /* 3 Config mac work mode with loopback flag
5872          * and its original configure parameters
5873          */
5874         hclge_cmd_reuse_desc(&desc, false);
5875         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5876         if (ret)
5877                 dev_err(&hdev->pdev->dev,
5878                         "mac loopback set fail, ret =%d.\n", ret);
5879         return ret;
5880 }
5881
5882 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5883                                      enum hnae3_loop loop_mode)
5884 {
5885 #define HCLGE_SERDES_RETRY_MS   10
5886 #define HCLGE_SERDES_RETRY_NUM  100
5887
5888 #define HCLGE_MAC_LINK_STATUS_MS   10
5889 #define HCLGE_MAC_LINK_STATUS_NUM  100
5890 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5891 #define HCLGE_MAC_LINK_STATUS_UP   1
5892
5893         struct hclge_serdes_lb_cmd *req;
5894         struct hclge_desc desc;
5895         int mac_link_ret = 0;
5896         int ret, i = 0;
5897         u8 loop_mode_b;
5898
5899         req = (struct hclge_serdes_lb_cmd *)desc.data;
5900         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5901
5902         switch (loop_mode) {
5903         case HNAE3_LOOP_SERIAL_SERDES:
5904                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5905                 break;
5906         case HNAE3_LOOP_PARALLEL_SERDES:
5907                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5908                 break;
5909         default:
5910                 dev_err(&hdev->pdev->dev,
5911                         "unsupported serdes loopback mode %d\n", loop_mode);
5912                 return -ENOTSUPP;
5913         }
5914
5915         if (en) {
5916                 req->enable = loop_mode_b;
5917                 req->mask = loop_mode_b;
5918                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5919         } else {
5920                 req->mask = loop_mode_b;
5921                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5922         }
5923
5924         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5925         if (ret) {
5926                 dev_err(&hdev->pdev->dev,
5927                         "serdes loopback set fail, ret = %d\n", ret);
5928                 return ret;
5929         }
5930
5931         do {
5932                 msleep(HCLGE_SERDES_RETRY_MS);
5933                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5934                                            true);
5935                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5936                 if (ret) {
5937                         dev_err(&hdev->pdev->dev,
5938                                 "serdes loopback get, ret = %d\n", ret);
5939                         return ret;
5940                 }
5941         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5942                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5943
5944         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5945                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5946                 return -EBUSY;
5947         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5948                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5949                 return -EIO;
5950         }
5951
5952         hclge_cfg_mac_mode(hdev, en);
5953
5954         i = 0;
5955         do {
5956                 /* serdes Internal loopback, independent of the network cable.*/
5957                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5958                 ret = hclge_get_mac_link_status(hdev);
5959                 if (ret == mac_link_ret)
5960                         return 0;
5961         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5962
5963         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5964
5965         return -EBUSY;
5966 }
5967
5968 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5969                             int stream_id, bool enable)
5970 {
5971         struct hclge_desc desc;
5972         struct hclge_cfg_com_tqp_queue_cmd *req =
5973                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5974         int ret;
5975
5976         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5977         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5978         req->stream_id = cpu_to_le16(stream_id);
5979         req->enable |= enable << HCLGE_TQP_ENABLE_B;
5980
5981         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5982         if (ret)
5983                 dev_err(&hdev->pdev->dev,
5984                         "Tqp enable fail, status =%d.\n", ret);
5985         return ret;
5986 }
5987
5988 static int hclge_set_loopback(struct hnae3_handle *handle,
5989                               enum hnae3_loop loop_mode, bool en)
5990 {
5991         struct hclge_vport *vport = hclge_get_vport(handle);
5992         struct hnae3_knic_private_info *kinfo;
5993         struct hclge_dev *hdev = vport->back;
5994         int i, ret;
5995
5996         switch (loop_mode) {
5997         case HNAE3_LOOP_APP:
5998                 ret = hclge_set_app_loopback(hdev, en);
5999                 break;
6000         case HNAE3_LOOP_SERIAL_SERDES:
6001         case HNAE3_LOOP_PARALLEL_SERDES:
6002                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6003                 break;
6004         default:
6005                 ret = -ENOTSUPP;
6006                 dev_err(&hdev->pdev->dev,
6007                         "loop_mode %d is not supported\n", loop_mode);
6008                 break;
6009         }
6010
6011         if (ret)
6012                 return ret;
6013
6014         kinfo = &vport->nic.kinfo;
6015         for (i = 0; i < kinfo->num_tqps; i++) {
6016                 ret = hclge_tqp_enable(hdev, i, 0, en);
6017                 if (ret)
6018                         return ret;
6019         }
6020
6021         return 0;
6022 }
6023
6024 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6025 {
6026         struct hclge_vport *vport = hclge_get_vport(handle);
6027         struct hnae3_knic_private_info *kinfo;
6028         struct hnae3_queue *queue;
6029         struct hclge_tqp *tqp;
6030         int i;
6031
6032         kinfo = &vport->nic.kinfo;
6033         for (i = 0; i < kinfo->num_tqps; i++) {
6034                 queue = handle->kinfo.tqp[i];
6035                 tqp = container_of(queue, struct hclge_tqp, q);
6036                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6037         }
6038 }
6039
6040 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6041 {
6042         struct hclge_vport *vport = hclge_get_vport(handle);
6043         struct hclge_dev *hdev = vport->back;
6044
6045         if (enable) {
6046                 mod_timer(&hdev->service_timer, jiffies + HZ);
6047         } else {
6048                 del_timer_sync(&hdev->service_timer);
6049                 cancel_work_sync(&hdev->service_task);
6050                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6051         }
6052 }
6053
6054 static int hclge_ae_start(struct hnae3_handle *handle)
6055 {
6056         struct hclge_vport *vport = hclge_get_vport(handle);
6057         struct hclge_dev *hdev = vport->back;
6058
6059         /* mac enable */
6060         hclge_cfg_mac_mode(hdev, true);
6061         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6062         hdev->hw.mac.link = 0;
6063
6064         /* reset tqp stats */
6065         hclge_reset_tqp_stats(handle);
6066
6067         hclge_mac_start_phy(hdev);
6068
6069         return 0;
6070 }
6071
6072 static void hclge_ae_stop(struct hnae3_handle *handle)
6073 {
6074         struct hclge_vport *vport = hclge_get_vport(handle);
6075         struct hclge_dev *hdev = vport->back;
6076         int i;
6077
6078         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6079
6080         hclge_clear_arfs_rules(handle);
6081
6082         /* If it is not PF reset, the firmware will disable the MAC,
6083          * so it only need to stop phy here.
6084          */
6085         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6086             hdev->reset_type != HNAE3_FUNC_RESET) {
6087                 hclge_mac_stop_phy(hdev);
6088                 return;
6089         }
6090
6091         for (i = 0; i < handle->kinfo.num_tqps; i++)
6092                 hclge_reset_tqp(handle, i);
6093
6094         /* Mac disable */
6095         hclge_cfg_mac_mode(hdev, false);
6096
6097         hclge_mac_stop_phy(hdev);
6098
6099         /* reset tqp stats */
6100         hclge_reset_tqp_stats(handle);
6101         hclge_update_link_status(hdev);
6102 }
6103
6104 int hclge_vport_start(struct hclge_vport *vport)
6105 {
6106         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6107         vport->last_active_jiffies = jiffies;
6108         return 0;
6109 }
6110
6111 void hclge_vport_stop(struct hclge_vport *vport)
6112 {
6113         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6114 }
6115
6116 static int hclge_client_start(struct hnae3_handle *handle)
6117 {
6118         struct hclge_vport *vport = hclge_get_vport(handle);
6119
6120         return hclge_vport_start(vport);
6121 }
6122
6123 static void hclge_client_stop(struct hnae3_handle *handle)
6124 {
6125         struct hclge_vport *vport = hclge_get_vport(handle);
6126
6127         hclge_vport_stop(vport);
6128 }
6129
6130 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6131                                          u16 cmdq_resp, u8  resp_code,
6132                                          enum hclge_mac_vlan_tbl_opcode op)
6133 {
6134         struct hclge_dev *hdev = vport->back;
6135         int return_status = -EIO;
6136
6137         if (cmdq_resp) {
6138                 dev_err(&hdev->pdev->dev,
6139                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6140                         cmdq_resp);
6141                 return -EIO;
6142         }
6143
6144         if (op == HCLGE_MAC_VLAN_ADD) {
6145                 if ((!resp_code) || (resp_code == 1)) {
6146                         return_status = 0;
6147                 } else if (resp_code == 2) {
6148                         return_status = -ENOSPC;
6149                         dev_err(&hdev->pdev->dev,
6150                                 "add mac addr failed for uc_overflow.\n");
6151                 } else if (resp_code == 3) {
6152                         return_status = -ENOSPC;
6153                         dev_err(&hdev->pdev->dev,
6154                                 "add mac addr failed for mc_overflow.\n");
6155                 } else {
6156                         dev_err(&hdev->pdev->dev,
6157                                 "add mac addr failed for undefined, code=%d.\n",
6158                                 resp_code);
6159                 }
6160         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6161                 if (!resp_code) {
6162                         return_status = 0;
6163                 } else if (resp_code == 1) {
6164                         return_status = -ENOENT;
6165                         dev_dbg(&hdev->pdev->dev,
6166                                 "remove mac addr failed for miss.\n");
6167                 } else {
6168                         dev_err(&hdev->pdev->dev,
6169                                 "remove mac addr failed for undefined, code=%d.\n",
6170                                 resp_code);
6171                 }
6172         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6173                 if (!resp_code) {
6174                         return_status = 0;
6175                 } else if (resp_code == 1) {
6176                         return_status = -ENOENT;
6177                         dev_dbg(&hdev->pdev->dev,
6178                                 "lookup mac addr failed for miss.\n");
6179                 } else {
6180                         dev_err(&hdev->pdev->dev,
6181                                 "lookup mac addr failed for undefined, code=%d.\n",
6182                                 resp_code);
6183                 }
6184         } else {
6185                 return_status = -EINVAL;
6186                 dev_err(&hdev->pdev->dev,
6187                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6188                         op);
6189         }
6190
6191         return return_status;
6192 }
6193
6194 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6195 {
6196         int word_num;
6197         int bit_num;
6198
6199         if (vfid > 255 || vfid < 0)
6200                 return -EIO;
6201
6202         if (vfid >= 0 && vfid <= 191) {
6203                 word_num = vfid / 32;
6204                 bit_num  = vfid % 32;
6205                 if (clr)
6206                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6207                 else
6208                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6209         } else {
6210                 word_num = (vfid - 192) / 32;
6211                 bit_num  = vfid % 32;
6212                 if (clr)
6213                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6214                 else
6215                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6216         }
6217
6218         return 0;
6219 }
6220
6221 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6222 {
6223 #define HCLGE_DESC_NUMBER 3
6224 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6225         int i, j;
6226
6227         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6228                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6229                         if (desc[i].data[j])
6230                                 return false;
6231
6232         return true;
6233 }
6234
6235 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6236                                    const u8 *addr, bool is_mc)
6237 {
6238         const unsigned char *mac_addr = addr;
6239         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6240                        (mac_addr[0]) | (mac_addr[1] << 8);
6241         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6242
6243         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6244         if (is_mc) {
6245                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6246                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6247         }
6248
6249         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6250         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6251 }
6252
6253 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6254                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6255 {
6256         struct hclge_dev *hdev = vport->back;
6257         struct hclge_desc desc;
6258         u8 resp_code;
6259         u16 retval;
6260         int ret;
6261
6262         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6263
6264         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6265
6266         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6267         if (ret) {
6268                 dev_err(&hdev->pdev->dev,
6269                         "del mac addr failed for cmd_send, ret =%d.\n",
6270                         ret);
6271                 return ret;
6272         }
6273         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6274         retval = le16_to_cpu(desc.retval);
6275
6276         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6277                                              HCLGE_MAC_VLAN_REMOVE);
6278 }
6279
6280 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6281                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6282                                      struct hclge_desc *desc,
6283                                      bool is_mc)
6284 {
6285         struct hclge_dev *hdev = vport->back;
6286         u8 resp_code;
6287         u16 retval;
6288         int ret;
6289
6290         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6291         if (is_mc) {
6292                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6293                 memcpy(desc[0].data,
6294                        req,
6295                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6296                 hclge_cmd_setup_basic_desc(&desc[1],
6297                                            HCLGE_OPC_MAC_VLAN_ADD,
6298                                            true);
6299                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6300                 hclge_cmd_setup_basic_desc(&desc[2],
6301                                            HCLGE_OPC_MAC_VLAN_ADD,
6302                                            true);
6303                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6304         } else {
6305                 memcpy(desc[0].data,
6306                        req,
6307                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6308                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6309         }
6310         if (ret) {
6311                 dev_err(&hdev->pdev->dev,
6312                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6313                         ret);
6314                 return ret;
6315         }
6316         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6317         retval = le16_to_cpu(desc[0].retval);
6318
6319         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6320                                              HCLGE_MAC_VLAN_LKUP);
6321 }
6322
6323 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6324                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6325                                   struct hclge_desc *mc_desc)
6326 {
6327         struct hclge_dev *hdev = vport->back;
6328         int cfg_status;
6329         u8 resp_code;
6330         u16 retval;
6331         int ret;
6332
6333         if (!mc_desc) {
6334                 struct hclge_desc desc;
6335
6336                 hclge_cmd_setup_basic_desc(&desc,
6337                                            HCLGE_OPC_MAC_VLAN_ADD,
6338                                            false);
6339                 memcpy(desc.data, req,
6340                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6341                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6342                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6343                 retval = le16_to_cpu(desc.retval);
6344
6345                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6346                                                            resp_code,
6347                                                            HCLGE_MAC_VLAN_ADD);
6348         } else {
6349                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6350                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6351                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6352                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6353                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6354                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6355                 memcpy(mc_desc[0].data, req,
6356                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6357                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6358                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6359                 retval = le16_to_cpu(mc_desc[0].retval);
6360
6361                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6362                                                            resp_code,
6363                                                            HCLGE_MAC_VLAN_ADD);
6364         }
6365
6366         if (ret) {
6367                 dev_err(&hdev->pdev->dev,
6368                         "add mac addr failed for cmd_send, ret =%d.\n",
6369                         ret);
6370                 return ret;
6371         }
6372
6373         return cfg_status;
6374 }
6375
6376 static int hclge_init_umv_space(struct hclge_dev *hdev)
6377 {
6378         u16 allocated_size = 0;
6379         int ret;
6380
6381         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6382                                   true);
6383         if (ret)
6384                 return ret;
6385
6386         if (allocated_size < hdev->wanted_umv_size)
6387                 dev_warn(&hdev->pdev->dev,
6388                          "Alloc umv space failed, want %d, get %d\n",
6389                          hdev->wanted_umv_size, allocated_size);
6390
6391         mutex_init(&hdev->umv_mutex);
6392         hdev->max_umv_size = allocated_size;
6393         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6394         hdev->share_umv_size = hdev->priv_umv_size +
6395                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6396
6397         return 0;
6398 }
6399
6400 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6401 {
6402         int ret;
6403
6404         if (hdev->max_umv_size > 0) {
6405                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6406                                           false);
6407                 if (ret)
6408                         return ret;
6409                 hdev->max_umv_size = 0;
6410         }
6411         mutex_destroy(&hdev->umv_mutex);
6412
6413         return 0;
6414 }
6415
6416 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6417                                u16 *allocated_size, bool is_alloc)
6418 {
6419         struct hclge_umv_spc_alc_cmd *req;
6420         struct hclge_desc desc;
6421         int ret;
6422
6423         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6424         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6425         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6426         req->space_size = cpu_to_le32(space_size);
6427
6428         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6429         if (ret) {
6430                 dev_err(&hdev->pdev->dev,
6431                         "%s umv space failed for cmd_send, ret =%d\n",
6432                         is_alloc ? "allocate" : "free", ret);
6433                 return ret;
6434         }
6435
6436         if (is_alloc && allocated_size)
6437                 *allocated_size = le32_to_cpu(desc.data[1]);
6438
6439         return 0;
6440 }
6441
6442 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6443 {
6444         struct hclge_vport *vport;
6445         int i;
6446
6447         for (i = 0; i < hdev->num_alloc_vport; i++) {
6448                 vport = &hdev->vport[i];
6449                 vport->used_umv_num = 0;
6450         }
6451
6452         mutex_lock(&hdev->umv_mutex);
6453         hdev->share_umv_size = hdev->priv_umv_size +
6454                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6455         mutex_unlock(&hdev->umv_mutex);
6456 }
6457
6458 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6459 {
6460         struct hclge_dev *hdev = vport->back;
6461         bool is_full;
6462
6463         mutex_lock(&hdev->umv_mutex);
6464         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6465                    hdev->share_umv_size == 0);
6466         mutex_unlock(&hdev->umv_mutex);
6467
6468         return is_full;
6469 }
6470
6471 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6472 {
6473         struct hclge_dev *hdev = vport->back;
6474
6475         mutex_lock(&hdev->umv_mutex);
6476         if (is_free) {
6477                 if (vport->used_umv_num > hdev->priv_umv_size)
6478                         hdev->share_umv_size++;
6479
6480                 if (vport->used_umv_num > 0)
6481                         vport->used_umv_num--;
6482         } else {
6483                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6484                     hdev->share_umv_size > 0)
6485                         hdev->share_umv_size--;
6486                 vport->used_umv_num++;
6487         }
6488         mutex_unlock(&hdev->umv_mutex);
6489 }
6490
6491 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6492                              const unsigned char *addr)
6493 {
6494         struct hclge_vport *vport = hclge_get_vport(handle);
6495
6496         return hclge_add_uc_addr_common(vport, addr);
6497 }
6498
6499 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6500                              const unsigned char *addr)
6501 {
6502         struct hclge_dev *hdev = vport->back;
6503         struct hclge_mac_vlan_tbl_entry_cmd req;
6504         struct hclge_desc desc;
6505         u16 egress_port = 0;
6506         int ret;
6507
6508         /* mac addr check */
6509         if (is_zero_ether_addr(addr) ||
6510             is_broadcast_ether_addr(addr) ||
6511             is_multicast_ether_addr(addr)) {
6512                 dev_err(&hdev->pdev->dev,
6513                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6514                          addr,
6515                          is_zero_ether_addr(addr),
6516                          is_broadcast_ether_addr(addr),
6517                          is_multicast_ether_addr(addr));
6518                 return -EINVAL;
6519         }
6520
6521         memset(&req, 0, sizeof(req));
6522
6523         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6524                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6525
6526         req.egress_port = cpu_to_le16(egress_port);
6527
6528         hclge_prepare_mac_addr(&req, addr, false);
6529
6530         /* Lookup the mac address in the mac_vlan table, and add
6531          * it if the entry is inexistent. Repeated unicast entry
6532          * is not allowed in the mac vlan table.
6533          */
6534         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6535         if (ret == -ENOENT) {
6536                 if (!hclge_is_umv_space_full(vport)) {
6537                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6538                         if (!ret)
6539                                 hclge_update_umv_space(vport, false);
6540                         return ret;
6541                 }
6542
6543                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6544                         hdev->priv_umv_size);
6545
6546                 return -ENOSPC;
6547         }
6548
6549         /* check if we just hit the duplicate */
6550         if (!ret) {
6551                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6552                          vport->vport_id, addr);
6553                 return 0;
6554         }
6555
6556         dev_err(&hdev->pdev->dev,
6557                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6558                 addr);
6559
6560         return ret;
6561 }
6562
6563 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6564                             const unsigned char *addr)
6565 {
6566         struct hclge_vport *vport = hclge_get_vport(handle);
6567
6568         return hclge_rm_uc_addr_common(vport, addr);
6569 }
6570
6571 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6572                             const unsigned char *addr)
6573 {
6574         struct hclge_dev *hdev = vport->back;
6575         struct hclge_mac_vlan_tbl_entry_cmd req;
6576         int ret;
6577
6578         /* mac addr check */
6579         if (is_zero_ether_addr(addr) ||
6580             is_broadcast_ether_addr(addr) ||
6581             is_multicast_ether_addr(addr)) {
6582                 dev_dbg(&hdev->pdev->dev,
6583                         "Remove mac err! invalid mac:%pM.\n",
6584                          addr);
6585                 return -EINVAL;
6586         }
6587
6588         memset(&req, 0, sizeof(req));
6589         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6590         hclge_prepare_mac_addr(&req, addr, false);
6591         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6592         if (!ret)
6593                 hclge_update_umv_space(vport, true);
6594
6595         return ret;
6596 }
6597
6598 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6599                              const unsigned char *addr)
6600 {
6601         struct hclge_vport *vport = hclge_get_vport(handle);
6602
6603         return hclge_add_mc_addr_common(vport, addr);
6604 }
6605
6606 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6607                              const unsigned char *addr)
6608 {
6609         struct hclge_dev *hdev = vport->back;
6610         struct hclge_mac_vlan_tbl_entry_cmd req;
6611         struct hclge_desc desc[3];
6612         int status;
6613
6614         /* mac addr check */
6615         if (!is_multicast_ether_addr(addr)) {
6616                 dev_err(&hdev->pdev->dev,
6617                         "Add mc mac err! invalid mac:%pM.\n",
6618                          addr);
6619                 return -EINVAL;
6620         }
6621         memset(&req, 0, sizeof(req));
6622         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6623         hclge_prepare_mac_addr(&req, addr, true);
6624         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6625         if (!status) {
6626                 /* This mac addr exist, update VFID for it */
6627                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6628                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6629         } else {
6630                 /* This mac addr do not exist, add new entry for it */
6631                 memset(desc[0].data, 0, sizeof(desc[0].data));
6632                 memset(desc[1].data, 0, sizeof(desc[0].data));
6633                 memset(desc[2].data, 0, sizeof(desc[0].data));
6634                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6635                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6636         }
6637
6638         if (status == -ENOSPC)
6639                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6640
6641         return status;
6642 }
6643
6644 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6645                             const unsigned char *addr)
6646 {
6647         struct hclge_vport *vport = hclge_get_vport(handle);
6648
6649         return hclge_rm_mc_addr_common(vport, addr);
6650 }
6651
6652 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6653                             const unsigned char *addr)
6654 {
6655         struct hclge_dev *hdev = vport->back;
6656         struct hclge_mac_vlan_tbl_entry_cmd req;
6657         enum hclge_cmd_status status;
6658         struct hclge_desc desc[3];
6659
6660         /* mac addr check */
6661         if (!is_multicast_ether_addr(addr)) {
6662                 dev_dbg(&hdev->pdev->dev,
6663                         "Remove mc mac err! invalid mac:%pM.\n",
6664                          addr);
6665                 return -EINVAL;
6666         }
6667
6668         memset(&req, 0, sizeof(req));
6669         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6670         hclge_prepare_mac_addr(&req, addr, true);
6671         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6672         if (!status) {
6673                 /* This mac addr exist, remove this handle's VFID for it */
6674                 hclge_update_desc_vfid(desc, vport->vport_id, true);
6675
6676                 if (hclge_is_all_function_id_zero(desc))
6677                         /* All the vfid is zero, so need to delete this entry */
6678                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6679                 else
6680                         /* Not all the vfid is zero, update the vfid */
6681                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6682
6683         } else {
6684                 /* Maybe this mac address is in mta table, but it cannot be
6685                  * deleted here because an entry of mta represents an address
6686                  * range rather than a specific address. the delete action to
6687                  * all entries will take effect in update_mta_status called by
6688                  * hns3_nic_set_rx_mode.
6689                  */
6690                 status = 0;
6691         }
6692
6693         return status;
6694 }
6695
6696 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6697                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6698 {
6699         struct hclge_vport_mac_addr_cfg *mac_cfg;
6700         struct list_head *list;
6701
6702         if (!vport->vport_id)
6703                 return;
6704
6705         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6706         if (!mac_cfg)
6707                 return;
6708
6709         mac_cfg->hd_tbl_status = true;
6710         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6711
6712         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6713                &vport->uc_mac_list : &vport->mc_mac_list;
6714
6715         list_add_tail(&mac_cfg->node, list);
6716 }
6717
6718 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6719                               bool is_write_tbl,
6720                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6721 {
6722         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6723         struct list_head *list;
6724         bool uc_flag, mc_flag;
6725
6726         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6727                &vport->uc_mac_list : &vport->mc_mac_list;
6728
6729         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6730         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6731
6732         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6733                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6734                         if (uc_flag && mac_cfg->hd_tbl_status)
6735                                 hclge_rm_uc_addr_common(vport, mac_addr);
6736
6737                         if (mc_flag && mac_cfg->hd_tbl_status)
6738                                 hclge_rm_mc_addr_common(vport, mac_addr);
6739
6740                         list_del(&mac_cfg->node);
6741                         kfree(mac_cfg);
6742                         break;
6743                 }
6744         }
6745 }
6746
6747 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6748                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6749 {
6750         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6751         struct list_head *list;
6752
6753         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6754                &vport->uc_mac_list : &vport->mc_mac_list;
6755
6756         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6757                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6758                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6759
6760                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6761                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6762
6763                 mac_cfg->hd_tbl_status = false;
6764                 if (is_del_list) {
6765                         list_del(&mac_cfg->node);
6766                         kfree(mac_cfg);
6767                 }
6768         }
6769 }
6770
6771 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6772 {
6773         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6774         struct hclge_vport *vport;
6775         int i;
6776
6777         mutex_lock(&hdev->vport_cfg_mutex);
6778         for (i = 0; i < hdev->num_alloc_vport; i++) {
6779                 vport = &hdev->vport[i];
6780                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6781                         list_del(&mac->node);
6782                         kfree(mac);
6783                 }
6784
6785                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6786                         list_del(&mac->node);
6787                         kfree(mac);
6788                 }
6789         }
6790         mutex_unlock(&hdev->vport_cfg_mutex);
6791 }
6792
6793 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6794                                               u16 cmdq_resp, u8 resp_code)
6795 {
6796 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6797 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6798 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6799 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6800
6801         int return_status;
6802
6803         if (cmdq_resp) {
6804                 dev_err(&hdev->pdev->dev,
6805                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6806                         cmdq_resp);
6807                 return -EIO;
6808         }
6809
6810         switch (resp_code) {
6811         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6812         case HCLGE_ETHERTYPE_ALREADY_ADD:
6813                 return_status = 0;
6814                 break;
6815         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6816                 dev_err(&hdev->pdev->dev,
6817                         "add mac ethertype failed for manager table overflow.\n");
6818                 return_status = -EIO;
6819                 break;
6820         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6821                 dev_err(&hdev->pdev->dev,
6822                         "add mac ethertype failed for key conflict.\n");
6823                 return_status = -EIO;
6824                 break;
6825         default:
6826                 dev_err(&hdev->pdev->dev,
6827                         "add mac ethertype failed for undefined, code=%d.\n",
6828                         resp_code);
6829                 return_status = -EIO;
6830         }
6831
6832         return return_status;
6833 }
6834
6835 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6836                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6837 {
6838         struct hclge_desc desc;
6839         u8 resp_code;
6840         u16 retval;
6841         int ret;
6842
6843         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6844         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6845
6846         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6847         if (ret) {
6848                 dev_err(&hdev->pdev->dev,
6849                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6850                         ret);
6851                 return ret;
6852         }
6853
6854         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6855         retval = le16_to_cpu(desc.retval);
6856
6857         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6858 }
6859
6860 static int init_mgr_tbl(struct hclge_dev *hdev)
6861 {
6862         int ret;
6863         int i;
6864
6865         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6866                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6867                 if (ret) {
6868                         dev_err(&hdev->pdev->dev,
6869                                 "add mac ethertype failed, ret =%d.\n",
6870                                 ret);
6871                         return ret;
6872                 }
6873         }
6874
6875         return 0;
6876 }
6877
6878 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6879 {
6880         struct hclge_vport *vport = hclge_get_vport(handle);
6881         struct hclge_dev *hdev = vport->back;
6882
6883         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6884 }
6885
6886 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6887                               bool is_first)
6888 {
6889         const unsigned char *new_addr = (const unsigned char *)p;
6890         struct hclge_vport *vport = hclge_get_vport(handle);
6891         struct hclge_dev *hdev = vport->back;
6892         int ret;
6893
6894         /* mac addr check */
6895         if (is_zero_ether_addr(new_addr) ||
6896             is_broadcast_ether_addr(new_addr) ||
6897             is_multicast_ether_addr(new_addr)) {
6898                 dev_err(&hdev->pdev->dev,
6899                         "Change uc mac err! invalid mac:%p.\n",
6900                          new_addr);
6901                 return -EINVAL;
6902         }
6903
6904         if ((!is_first || is_kdump_kernel()) &&
6905             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6906                 dev_warn(&hdev->pdev->dev,
6907                          "remove old uc mac address fail.\n");
6908
6909         ret = hclge_add_uc_addr(handle, new_addr);
6910         if (ret) {
6911                 dev_err(&hdev->pdev->dev,
6912                         "add uc mac address fail, ret =%d.\n",
6913                         ret);
6914
6915                 if (!is_first &&
6916                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6917                         dev_err(&hdev->pdev->dev,
6918                                 "restore uc mac address fail.\n");
6919
6920                 return -EIO;
6921         }
6922
6923         ret = hclge_pause_addr_cfg(hdev, new_addr);
6924         if (ret) {
6925                 dev_err(&hdev->pdev->dev,
6926                         "configure mac pause address fail, ret =%d.\n",
6927                         ret);
6928                 return -EIO;
6929         }
6930
6931         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6932
6933         return 0;
6934 }
6935
6936 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6937                           int cmd)
6938 {
6939         struct hclge_vport *vport = hclge_get_vport(handle);
6940         struct hclge_dev *hdev = vport->back;
6941
6942         if (!hdev->hw.mac.phydev)
6943                 return -EOPNOTSUPP;
6944
6945         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6946 }
6947
6948 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6949                                       u8 fe_type, bool filter_en, u8 vf_id)
6950 {
6951         struct hclge_vlan_filter_ctrl_cmd *req;
6952         struct hclge_desc desc;
6953         int ret;
6954
6955         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6956
6957         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6958         req->vlan_type = vlan_type;
6959         req->vlan_fe = filter_en ? fe_type : 0;
6960         req->vf_id = vf_id;
6961
6962         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6963         if (ret)
6964                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6965                         ret);
6966
6967         return ret;
6968 }
6969
6970 #define HCLGE_FILTER_TYPE_VF            0
6971 #define HCLGE_FILTER_TYPE_PORT          1
6972 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
6973 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
6974 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
6975 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
6976 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
6977 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
6978                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6979 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
6980                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6981
6982 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6983 {
6984         struct hclge_vport *vport = hclge_get_vport(handle);
6985         struct hclge_dev *hdev = vport->back;
6986
6987         if (hdev->pdev->revision >= 0x21) {
6988                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6989                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
6990                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6991                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
6992         } else {
6993                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6994                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6995                                            0);
6996         }
6997         if (enable)
6998                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6999         else
7000                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7001 }
7002
7003 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7004                                     bool is_kill, u16 vlan, u8 qos,
7005                                     __be16 proto)
7006 {
7007 #define HCLGE_MAX_VF_BYTES  16
7008         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7009         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7010         struct hclge_desc desc[2];
7011         u8 vf_byte_val;
7012         u8 vf_byte_off;
7013         int ret;
7014
7015         /* if vf vlan table is full, firmware will close vf vlan filter, it
7016          * is unable and unnecessary to add new vlan id to vf vlan filter
7017          */
7018         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7019                 return 0;
7020
7021         hclge_cmd_setup_basic_desc(&desc[0],
7022                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7023         hclge_cmd_setup_basic_desc(&desc[1],
7024                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7025
7026         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7027
7028         vf_byte_off = vfid / 8;
7029         vf_byte_val = 1 << (vfid % 8);
7030
7031         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7032         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7033
7034         req0->vlan_id  = cpu_to_le16(vlan);
7035         req0->vlan_cfg = is_kill;
7036
7037         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7038                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7039         else
7040                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7041
7042         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7043         if (ret) {
7044                 dev_err(&hdev->pdev->dev,
7045                         "Send vf vlan command fail, ret =%d.\n",
7046                         ret);
7047                 return ret;
7048         }
7049
7050         if (!is_kill) {
7051 #define HCLGE_VF_VLAN_NO_ENTRY  2
7052                 if (!req0->resp_code || req0->resp_code == 1)
7053                         return 0;
7054
7055                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7056                         set_bit(vfid, hdev->vf_vlan_full);
7057                         dev_warn(&hdev->pdev->dev,
7058                                  "vf vlan table is full, vf vlan filter is disabled\n");
7059                         return 0;
7060                 }
7061
7062                 dev_err(&hdev->pdev->dev,
7063                         "Add vf vlan filter fail, ret =%d.\n",
7064                         req0->resp_code);
7065         } else {
7066 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7067                 if (!req0->resp_code)
7068                         return 0;
7069
7070                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7071                         dev_warn(&hdev->pdev->dev,
7072                                  "vlan %d filter is not in vf vlan table\n",
7073                                  vlan);
7074                         return 0;
7075                 }
7076
7077                 dev_err(&hdev->pdev->dev,
7078                         "Kill vf vlan filter fail, ret =%d.\n",
7079                         req0->resp_code);
7080         }
7081
7082         return -EIO;
7083 }
7084
7085 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7086                                       u16 vlan_id, bool is_kill)
7087 {
7088         struct hclge_vlan_filter_pf_cfg_cmd *req;
7089         struct hclge_desc desc;
7090         u8 vlan_offset_byte_val;
7091         u8 vlan_offset_byte;
7092         u8 vlan_offset_160;
7093         int ret;
7094
7095         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7096
7097         vlan_offset_160 = vlan_id / 160;
7098         vlan_offset_byte = (vlan_id % 160) / 8;
7099         vlan_offset_byte_val = 1 << (vlan_id % 8);
7100
7101         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7102         req->vlan_offset = vlan_offset_160;
7103         req->vlan_cfg = is_kill;
7104         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7105
7106         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7107         if (ret)
7108                 dev_err(&hdev->pdev->dev,
7109                         "port vlan command, send fail, ret =%d.\n", ret);
7110         return ret;
7111 }
7112
7113 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7114                                     u16 vport_id, u16 vlan_id, u8 qos,
7115                                     bool is_kill)
7116 {
7117         u16 vport_idx, vport_num = 0;
7118         int ret;
7119
7120         if (is_kill && !vlan_id)
7121                 return 0;
7122
7123         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7124                                        0, proto);
7125         if (ret) {
7126                 dev_err(&hdev->pdev->dev,
7127                         "Set %d vport vlan filter config fail, ret =%d.\n",
7128                         vport_id, ret);
7129                 return ret;
7130         }
7131
7132         /* vlan 0 may be added twice when 8021q module is enabled */
7133         if (!is_kill && !vlan_id &&
7134             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7135                 return 0;
7136
7137         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7138                 dev_err(&hdev->pdev->dev,
7139                         "Add port vlan failed, vport %d is already in vlan %d\n",
7140                         vport_id, vlan_id);
7141                 return -EINVAL;
7142         }
7143
7144         if (is_kill &&
7145             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7146                 dev_err(&hdev->pdev->dev,
7147                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7148                         vport_id, vlan_id);
7149                 return -EINVAL;
7150         }
7151
7152         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7153                 vport_num++;
7154
7155         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7156                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7157                                                  is_kill);
7158
7159         return ret;
7160 }
7161
7162 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7163 {
7164         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7165         struct hclge_vport_vtag_tx_cfg_cmd *req;
7166         struct hclge_dev *hdev = vport->back;
7167         struct hclge_desc desc;
7168         int status;
7169
7170         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7171
7172         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7173         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7174         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7175         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7176                       vcfg->accept_tag1 ? 1 : 0);
7177         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7178                       vcfg->accept_untag1 ? 1 : 0);
7179         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7180                       vcfg->accept_tag2 ? 1 : 0);
7181         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7182                       vcfg->accept_untag2 ? 1 : 0);
7183         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7184                       vcfg->insert_tag1_en ? 1 : 0);
7185         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7186                       vcfg->insert_tag2_en ? 1 : 0);
7187         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7188
7189         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7190         req->vf_bitmap[req->vf_offset] =
7191                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7192
7193         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7194         if (status)
7195                 dev_err(&hdev->pdev->dev,
7196                         "Send port txvlan cfg command fail, ret =%d\n",
7197                         status);
7198
7199         return status;
7200 }
7201
7202 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7203 {
7204         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7205         struct hclge_vport_vtag_rx_cfg_cmd *req;
7206         struct hclge_dev *hdev = vport->back;
7207         struct hclge_desc desc;
7208         int status;
7209
7210         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7211
7212         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7213         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7214                       vcfg->strip_tag1_en ? 1 : 0);
7215         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7216                       vcfg->strip_tag2_en ? 1 : 0);
7217         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7218                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7219         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7220                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7221
7222         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7223         req->vf_bitmap[req->vf_offset] =
7224                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7225
7226         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7227         if (status)
7228                 dev_err(&hdev->pdev->dev,
7229                         "Send port rxvlan cfg command fail, ret =%d\n",
7230                         status);
7231
7232         return status;
7233 }
7234
7235 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7236                                   u16 port_base_vlan_state,
7237                                   u16 vlan_tag)
7238 {
7239         int ret;
7240
7241         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7242                 vport->txvlan_cfg.accept_tag1 = true;
7243                 vport->txvlan_cfg.insert_tag1_en = false;
7244                 vport->txvlan_cfg.default_tag1 = 0;
7245         } else {
7246                 vport->txvlan_cfg.accept_tag1 = false;
7247                 vport->txvlan_cfg.insert_tag1_en = true;
7248                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7249         }
7250
7251         vport->txvlan_cfg.accept_untag1 = true;
7252
7253         /* accept_tag2 and accept_untag2 are not supported on
7254          * pdev revision(0x20), new revision support them,
7255          * this two fields can not be configured by user.
7256          */
7257         vport->txvlan_cfg.accept_tag2 = true;
7258         vport->txvlan_cfg.accept_untag2 = true;
7259         vport->txvlan_cfg.insert_tag2_en = false;
7260         vport->txvlan_cfg.default_tag2 = 0;
7261
7262         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7263                 vport->rxvlan_cfg.strip_tag1_en = false;
7264                 vport->rxvlan_cfg.strip_tag2_en =
7265                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7266         } else {
7267                 vport->rxvlan_cfg.strip_tag1_en =
7268                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7269                 vport->rxvlan_cfg.strip_tag2_en = true;
7270         }
7271         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7272         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7273
7274         ret = hclge_set_vlan_tx_offload_cfg(vport);
7275         if (ret)
7276                 return ret;
7277
7278         return hclge_set_vlan_rx_offload_cfg(vport);
7279 }
7280
7281 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7282 {
7283         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7284         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7285         struct hclge_desc desc;
7286         int status;
7287
7288         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7289         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7290         rx_req->ot_fst_vlan_type =
7291                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7292         rx_req->ot_sec_vlan_type =
7293                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7294         rx_req->in_fst_vlan_type =
7295                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7296         rx_req->in_sec_vlan_type =
7297                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7298
7299         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7300         if (status) {
7301                 dev_err(&hdev->pdev->dev,
7302                         "Send rxvlan protocol type command fail, ret =%d\n",
7303                         status);
7304                 return status;
7305         }
7306
7307         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7308
7309         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7310         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7311         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7312
7313         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7314         if (status)
7315                 dev_err(&hdev->pdev->dev,
7316                         "Send txvlan protocol type command fail, ret =%d\n",
7317                         status);
7318
7319         return status;
7320 }
7321
7322 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7323 {
7324 #define HCLGE_DEF_VLAN_TYPE             0x8100
7325
7326         struct hnae3_handle *handle = &hdev->vport[0].nic;
7327         struct hclge_vport *vport;
7328         int ret;
7329         int i;
7330
7331         if (hdev->pdev->revision >= 0x21) {
7332                 /* for revision 0x21, vf vlan filter is per function */
7333                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7334                         vport = &hdev->vport[i];
7335                         ret = hclge_set_vlan_filter_ctrl(hdev,
7336                                                          HCLGE_FILTER_TYPE_VF,
7337                                                          HCLGE_FILTER_FE_EGRESS,
7338                                                          true,
7339                                                          vport->vport_id);
7340                         if (ret)
7341                                 return ret;
7342                 }
7343
7344                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7345                                                  HCLGE_FILTER_FE_INGRESS, true,
7346                                                  0);
7347                 if (ret)
7348                         return ret;
7349         } else {
7350                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7351                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7352                                                  true, 0);
7353                 if (ret)
7354                         return ret;
7355         }
7356
7357         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7358
7359         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7360         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7361         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7362         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7363         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7364         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7365
7366         ret = hclge_set_vlan_protocol_type(hdev);
7367         if (ret)
7368                 return ret;
7369
7370         for (i = 0; i < hdev->num_alloc_vport; i++) {
7371                 u16 vlan_tag;
7372
7373                 vport = &hdev->vport[i];
7374                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7375
7376                 ret = hclge_vlan_offload_cfg(vport,
7377                                              vport->port_base_vlan_cfg.state,
7378                                              vlan_tag);
7379                 if (ret)
7380                         return ret;
7381         }
7382
7383         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7384 }
7385
7386 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7387                                        bool writen_to_tbl)
7388 {
7389         struct hclge_vport_vlan_cfg *vlan;
7390
7391         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7392         if (!vlan)
7393                 return;
7394
7395         vlan->hd_tbl_status = writen_to_tbl;
7396         vlan->vlan_id = vlan_id;
7397
7398         list_add_tail(&vlan->node, &vport->vlan_list);
7399 }
7400
7401 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7402 {
7403         struct hclge_vport_vlan_cfg *vlan, *tmp;
7404         struct hclge_dev *hdev = vport->back;
7405         int ret;
7406
7407         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7408                 if (!vlan->hd_tbl_status) {
7409                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7410                                                        vport->vport_id,
7411                                                        vlan->vlan_id, 0, false);
7412                         if (ret) {
7413                                 dev_err(&hdev->pdev->dev,
7414                                         "restore vport vlan list failed, ret=%d\n",
7415                                         ret);
7416                                 return ret;
7417                         }
7418                 }
7419                 vlan->hd_tbl_status = true;
7420         }
7421
7422         return 0;
7423 }
7424
7425 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7426                                       bool is_write_tbl)
7427 {
7428         struct hclge_vport_vlan_cfg *vlan, *tmp;
7429         struct hclge_dev *hdev = vport->back;
7430
7431         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7432                 if (vlan->vlan_id == vlan_id) {
7433                         if (is_write_tbl && vlan->hd_tbl_status)
7434                                 hclge_set_vlan_filter_hw(hdev,
7435                                                          htons(ETH_P_8021Q),
7436                                                          vport->vport_id,
7437                                                          vlan_id, 0,
7438                                                          true);
7439
7440                         list_del(&vlan->node);
7441                         kfree(vlan);
7442                         break;
7443                 }
7444         }
7445 }
7446
7447 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7448 {
7449         struct hclge_vport_vlan_cfg *vlan, *tmp;
7450         struct hclge_dev *hdev = vport->back;
7451
7452         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7453                 if (vlan->hd_tbl_status)
7454                         hclge_set_vlan_filter_hw(hdev,
7455                                                  htons(ETH_P_8021Q),
7456                                                  vport->vport_id,
7457                                                  vlan->vlan_id, 0,
7458                                                  true);
7459
7460                 vlan->hd_tbl_status = false;
7461                 if (is_del_list) {
7462                         list_del(&vlan->node);
7463                         kfree(vlan);
7464                 }
7465         }
7466 }
7467
7468 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7469 {
7470         struct hclge_vport_vlan_cfg *vlan, *tmp;
7471         struct hclge_vport *vport;
7472         int i;
7473
7474         mutex_lock(&hdev->vport_cfg_mutex);
7475         for (i = 0; i < hdev->num_alloc_vport; i++) {
7476                 vport = &hdev->vport[i];
7477                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7478                         list_del(&vlan->node);
7479                         kfree(vlan);
7480                 }
7481         }
7482         mutex_unlock(&hdev->vport_cfg_mutex);
7483 }
7484
7485 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7486 {
7487         struct hclge_vport *vport = hclge_get_vport(handle);
7488         struct hclge_vport_vlan_cfg *vlan, *tmp;
7489         struct hclge_dev *hdev = vport->back;
7490         u16 vlan_proto, qos;
7491         u16 state, vlan_id;
7492         int i;
7493
7494         mutex_lock(&hdev->vport_cfg_mutex);
7495         for (i = 0; i < hdev->num_alloc_vport; i++) {
7496                 vport = &hdev->vport[i];
7497                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7498                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7499                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7500                 state = vport->port_base_vlan_cfg.state;
7501
7502                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7503                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7504                                                  vport->vport_id, vlan_id, qos,
7505                                                  false);
7506                         continue;
7507                 }
7508
7509                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7510                         if (vlan->hd_tbl_status)
7511                                 hclge_set_vlan_filter_hw(hdev,
7512                                                          htons(ETH_P_8021Q),
7513                                                          vport->vport_id,
7514                                                          vlan->vlan_id, 0,
7515                                                          false);
7516                 }
7517         }
7518
7519         mutex_unlock(&hdev->vport_cfg_mutex);
7520 }
7521
7522 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7523 {
7524         struct hclge_vport *vport = hclge_get_vport(handle);
7525
7526         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7527                 vport->rxvlan_cfg.strip_tag1_en = false;
7528                 vport->rxvlan_cfg.strip_tag2_en = enable;
7529         } else {
7530                 vport->rxvlan_cfg.strip_tag1_en = enable;
7531                 vport->rxvlan_cfg.strip_tag2_en = true;
7532         }
7533         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7534         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7535         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7536
7537         return hclge_set_vlan_rx_offload_cfg(vport);
7538 }
7539
7540 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7541                                             u16 port_base_vlan_state,
7542                                             struct hclge_vlan_info *new_info,
7543                                             struct hclge_vlan_info *old_info)
7544 {
7545         struct hclge_dev *hdev = vport->back;
7546         int ret;
7547
7548         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7549                 hclge_rm_vport_all_vlan_table(vport, false);
7550                 return hclge_set_vlan_filter_hw(hdev,
7551                                                  htons(new_info->vlan_proto),
7552                                                  vport->vport_id,
7553                                                  new_info->vlan_tag,
7554                                                  new_info->qos, false);
7555         }
7556
7557         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7558                                        vport->vport_id, old_info->vlan_tag,
7559                                        old_info->qos, true);
7560         if (ret)
7561                 return ret;
7562
7563         return hclge_add_vport_all_vlan_table(vport);
7564 }
7565
7566 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7567                                     struct hclge_vlan_info *vlan_info)
7568 {
7569         struct hnae3_handle *nic = &vport->nic;
7570         struct hclge_vlan_info *old_vlan_info;
7571         struct hclge_dev *hdev = vport->back;
7572         int ret;
7573
7574         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7575
7576         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7577         if (ret)
7578                 return ret;
7579
7580         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7581                 /* add new VLAN tag */
7582                 ret = hclge_set_vlan_filter_hw(hdev,
7583                                                htons(vlan_info->vlan_proto),
7584                                                vport->vport_id,
7585                                                vlan_info->vlan_tag,
7586                                                vlan_info->qos, false);
7587                 if (ret)
7588                         return ret;
7589
7590                 /* remove old VLAN tag */
7591                 ret = hclge_set_vlan_filter_hw(hdev,
7592                                                htons(old_vlan_info->vlan_proto),
7593                                                vport->vport_id,
7594                                                old_vlan_info->vlan_tag,
7595                                                old_vlan_info->qos, true);
7596                 if (ret)
7597                         return ret;
7598
7599                 goto update;
7600         }
7601
7602         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7603                                                old_vlan_info);
7604         if (ret)
7605                 return ret;
7606
7607         /* update state only when disable/enable port based VLAN */
7608         vport->port_base_vlan_cfg.state = state;
7609         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7610                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7611         else
7612                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7613
7614 update:
7615         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7616         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7617         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7618
7619         return 0;
7620 }
7621
7622 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7623                                           enum hnae3_port_base_vlan_state state,
7624                                           u16 vlan)
7625 {
7626         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7627                 if (!vlan)
7628                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7629                 else
7630                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7631         } else {
7632                 if (!vlan)
7633                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7634                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7635                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7636                 else
7637                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7638         }
7639 }
7640
7641 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7642                                     u16 vlan, u8 qos, __be16 proto)
7643 {
7644         struct hclge_vport *vport = hclge_get_vport(handle);
7645         struct hclge_dev *hdev = vport->back;
7646         struct hclge_vlan_info vlan_info;
7647         u16 state;
7648         int ret;
7649
7650         if (hdev->pdev->revision == 0x20)
7651                 return -EOPNOTSUPP;
7652
7653         /* qos is a 3 bits value, so can not be bigger than 7 */
7654         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7655                 return -EINVAL;
7656         if (proto != htons(ETH_P_8021Q))
7657                 return -EPROTONOSUPPORT;
7658
7659         vport = &hdev->vport[vfid];
7660         state = hclge_get_port_base_vlan_state(vport,
7661                                                vport->port_base_vlan_cfg.state,
7662                                                vlan);
7663         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7664                 return 0;
7665
7666         vlan_info.vlan_tag = vlan;
7667         vlan_info.qos = qos;
7668         vlan_info.vlan_proto = ntohs(proto);
7669
7670         /* update port based VLAN for PF */
7671         if (!vfid) {
7672                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7673                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7674                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7675
7676                 return ret;
7677         }
7678
7679         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7680                 return hclge_update_port_base_vlan_cfg(vport, state,
7681                                                        &vlan_info);
7682         } else {
7683                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7684                                                         (u8)vfid, state,
7685                                                         vlan, qos,
7686                                                         ntohs(proto));
7687                 return ret;
7688         }
7689 }
7690
7691 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7692                           u16 vlan_id, bool is_kill)
7693 {
7694         struct hclge_vport *vport = hclge_get_vport(handle);
7695         struct hclge_dev *hdev = vport->back;
7696         bool writen_to_tbl = false;
7697         int ret = 0;
7698
7699         /* when port based VLAN enabled, we use port based VLAN as the VLAN
7700          * filter entry. In this case, we don't update VLAN filter table
7701          * when user add new VLAN or remove exist VLAN, just update the vport
7702          * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7703          * table until port based VLAN disabled
7704          */
7705         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7706                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7707                                                vlan_id, 0, is_kill);
7708                 writen_to_tbl = true;
7709         }
7710
7711         if (ret)
7712                 return ret;
7713
7714         if (is_kill)
7715                 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7716         else
7717                 hclge_add_vport_vlan_table(vport, vlan_id,
7718                                            writen_to_tbl);
7719
7720         return 0;
7721 }
7722
7723 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7724 {
7725         struct hclge_config_max_frm_size_cmd *req;
7726         struct hclge_desc desc;
7727
7728         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7729
7730         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7731         req->max_frm_size = cpu_to_le16(new_mps);
7732         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7733
7734         return hclge_cmd_send(&hdev->hw, &desc, 1);
7735 }
7736
7737 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7738 {
7739         struct hclge_vport *vport = hclge_get_vport(handle);
7740
7741         return hclge_set_vport_mtu(vport, new_mtu);
7742 }
7743
7744 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7745 {
7746         struct hclge_dev *hdev = vport->back;
7747         int i, max_frm_size, ret = 0;
7748
7749         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7750         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7751             max_frm_size > HCLGE_MAC_MAX_FRAME)
7752                 return -EINVAL;
7753
7754         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7755         mutex_lock(&hdev->vport_lock);
7756         /* VF's mps must fit within hdev->mps */
7757         if (vport->vport_id && max_frm_size > hdev->mps) {
7758                 mutex_unlock(&hdev->vport_lock);
7759                 return -EINVAL;
7760         } else if (vport->vport_id) {
7761                 vport->mps = max_frm_size;
7762                 mutex_unlock(&hdev->vport_lock);
7763                 return 0;
7764         }
7765
7766         /* PF's mps must be greater then VF's mps */
7767         for (i = 1; i < hdev->num_alloc_vport; i++)
7768                 if (max_frm_size < hdev->vport[i].mps) {
7769                         mutex_unlock(&hdev->vport_lock);
7770                         return -EINVAL;
7771                 }
7772
7773         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7774
7775         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7776         if (ret) {
7777                 dev_err(&hdev->pdev->dev,
7778                         "Change mtu fail, ret =%d\n", ret);
7779                 goto out;
7780         }
7781
7782         hdev->mps = max_frm_size;
7783         vport->mps = max_frm_size;
7784
7785         ret = hclge_buffer_alloc(hdev);
7786         if (ret)
7787                 dev_err(&hdev->pdev->dev,
7788                         "Allocate buffer fail, ret =%d\n", ret);
7789
7790 out:
7791         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7792         mutex_unlock(&hdev->vport_lock);
7793         return ret;
7794 }
7795
7796 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7797                                     bool enable)
7798 {
7799         struct hclge_reset_tqp_queue_cmd *req;
7800         struct hclge_desc desc;
7801         int ret;
7802
7803         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7804
7805         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7806         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7807         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7808
7809         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7810         if (ret) {
7811                 dev_err(&hdev->pdev->dev,
7812                         "Send tqp reset cmd error, status =%d\n", ret);
7813                 return ret;
7814         }
7815
7816         return 0;
7817 }
7818
7819 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7820 {
7821         struct hclge_reset_tqp_queue_cmd *req;
7822         struct hclge_desc desc;
7823         int ret;
7824
7825         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7826
7827         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7828         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7829
7830         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7831         if (ret) {
7832                 dev_err(&hdev->pdev->dev,
7833                         "Get reset status error, status =%d\n", ret);
7834                 return ret;
7835         }
7836
7837         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7838 }
7839
7840 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7841 {
7842         struct hnae3_queue *queue;
7843         struct hclge_tqp *tqp;
7844
7845         queue = handle->kinfo.tqp[queue_id];
7846         tqp = container_of(queue, struct hclge_tqp, q);
7847
7848         return tqp->index;
7849 }
7850
7851 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7852 {
7853         struct hclge_vport *vport = hclge_get_vport(handle);
7854         struct hclge_dev *hdev = vport->back;
7855         int reset_try_times = 0;
7856         int reset_status;
7857         u16 queue_gid;
7858         int ret = 0;
7859
7860         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7861
7862         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7863         if (ret) {
7864                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7865                 return ret;
7866         }
7867
7868         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7869         if (ret) {
7870                 dev_err(&hdev->pdev->dev,
7871                         "Send reset tqp cmd fail, ret = %d\n", ret);
7872                 return ret;
7873         }
7874
7875         reset_try_times = 0;
7876         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7877                 /* Wait for tqp hw reset */
7878                 msleep(20);
7879                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7880                 if (reset_status)
7881                         break;
7882         }
7883
7884         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7885                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7886                 return ret;
7887         }
7888
7889         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7890         if (ret)
7891                 dev_err(&hdev->pdev->dev,
7892                         "Deassert the soft reset fail, ret = %d\n", ret);
7893
7894         return ret;
7895 }
7896
7897 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7898 {
7899         struct hclge_dev *hdev = vport->back;
7900         int reset_try_times = 0;
7901         int reset_status;
7902         u16 queue_gid;
7903         int ret;
7904
7905         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7906
7907         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7908         if (ret) {
7909                 dev_warn(&hdev->pdev->dev,
7910                          "Send reset tqp cmd fail, ret = %d\n", ret);
7911                 return;
7912         }
7913
7914         reset_try_times = 0;
7915         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7916                 /* Wait for tqp hw reset */
7917                 msleep(20);
7918                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7919                 if (reset_status)
7920                         break;
7921         }
7922
7923         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7924                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7925                 return;
7926         }
7927
7928         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7929         if (ret)
7930                 dev_warn(&hdev->pdev->dev,
7931                          "Deassert the soft reset fail, ret = %d\n", ret);
7932 }
7933
7934 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7935 {
7936         struct hclge_vport *vport = hclge_get_vport(handle);
7937         struct hclge_dev *hdev = vport->back;
7938
7939         return hdev->fw_version;
7940 }
7941
7942 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7943 {
7944         struct phy_device *phydev = hdev->hw.mac.phydev;
7945
7946         if (!phydev)
7947                 return;
7948
7949         phy_set_asym_pause(phydev, rx_en, tx_en);
7950 }
7951
7952 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7953 {
7954         int ret;
7955
7956         if (rx_en && tx_en)
7957                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7958         else if (rx_en && !tx_en)
7959                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7960         else if (!rx_en && tx_en)
7961                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7962         else
7963                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7964
7965         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7966                 return 0;
7967
7968         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7969         if (ret) {
7970                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7971                         ret);
7972                 return ret;
7973         }
7974
7975         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7976
7977         return 0;
7978 }
7979
7980 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7981 {
7982         struct phy_device *phydev = hdev->hw.mac.phydev;
7983         u16 remote_advertising = 0;
7984         u16 local_advertising = 0;
7985         u32 rx_pause, tx_pause;
7986         u8 flowctl;
7987
7988         if (!phydev->link || !phydev->autoneg)
7989                 return 0;
7990
7991         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7992
7993         if (phydev->pause)
7994                 remote_advertising = LPA_PAUSE_CAP;
7995
7996         if (phydev->asym_pause)
7997                 remote_advertising |= LPA_PAUSE_ASYM;
7998
7999         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8000                                            remote_advertising);
8001         tx_pause = flowctl & FLOW_CTRL_TX;
8002         rx_pause = flowctl & FLOW_CTRL_RX;
8003
8004         if (phydev->duplex == HCLGE_MAC_HALF) {
8005                 tx_pause = 0;
8006                 rx_pause = 0;
8007         }
8008
8009         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8010 }
8011
8012 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8013                                  u32 *rx_en, u32 *tx_en)
8014 {
8015         struct hclge_vport *vport = hclge_get_vport(handle);
8016         struct hclge_dev *hdev = vport->back;
8017
8018         *auto_neg = hclge_get_autoneg(handle);
8019
8020         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8021                 *rx_en = 0;
8022                 *tx_en = 0;
8023                 return;
8024         }
8025
8026         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8027                 *rx_en = 1;
8028                 *tx_en = 0;
8029         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8030                 *tx_en = 1;
8031                 *rx_en = 0;
8032         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8033                 *rx_en = 1;
8034                 *tx_en = 1;
8035         } else {
8036                 *rx_en = 0;
8037                 *tx_en = 0;
8038         }
8039 }
8040
8041 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8042                                 u32 rx_en, u32 tx_en)
8043 {
8044         struct hclge_vport *vport = hclge_get_vport(handle);
8045         struct hclge_dev *hdev = vport->back;
8046         struct phy_device *phydev = hdev->hw.mac.phydev;
8047         u32 fc_autoneg;
8048
8049         fc_autoneg = hclge_get_autoneg(handle);
8050         if (auto_neg != fc_autoneg) {
8051                 dev_info(&hdev->pdev->dev,
8052                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8053                 return -EOPNOTSUPP;
8054         }
8055
8056         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8057                 dev_info(&hdev->pdev->dev,
8058                          "Priority flow control enabled. Cannot set link flow control.\n");
8059                 return -EOPNOTSUPP;
8060         }
8061
8062         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8063
8064         if (!fc_autoneg)
8065                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8066
8067         if (phydev)
8068                 return phy_start_aneg(phydev);
8069
8070         if (hdev->pdev->revision == 0x20)
8071                 return -EOPNOTSUPP;
8072
8073         return hclge_restart_autoneg(handle);
8074 }
8075
8076 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8077                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8078 {
8079         struct hclge_vport *vport = hclge_get_vport(handle);
8080         struct hclge_dev *hdev = vport->back;
8081
8082         if (speed)
8083                 *speed = hdev->hw.mac.speed;
8084         if (duplex)
8085                 *duplex = hdev->hw.mac.duplex;
8086         if (auto_neg)
8087                 *auto_neg = hdev->hw.mac.autoneg;
8088 }
8089
8090 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8091                                  u8 *module_type)
8092 {
8093         struct hclge_vport *vport = hclge_get_vport(handle);
8094         struct hclge_dev *hdev = vport->back;
8095
8096         if (media_type)
8097                 *media_type = hdev->hw.mac.media_type;
8098
8099         if (module_type)
8100                 *module_type = hdev->hw.mac.module_type;
8101 }
8102
8103 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8104                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8105 {
8106         struct hclge_vport *vport = hclge_get_vport(handle);
8107         struct hclge_dev *hdev = vport->back;
8108         struct phy_device *phydev = hdev->hw.mac.phydev;
8109         int mdix_ctrl, mdix, retval, is_resolved;
8110
8111         if (!phydev) {
8112                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8113                 *tp_mdix = ETH_TP_MDI_INVALID;
8114                 return;
8115         }
8116
8117         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8118
8119         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8120         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8121                                     HCLGE_PHY_MDIX_CTRL_S);
8122
8123         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8124         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8125         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8126
8127         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8128
8129         switch (mdix_ctrl) {
8130         case 0x0:
8131                 *tp_mdix_ctrl = ETH_TP_MDI;
8132                 break;
8133         case 0x1:
8134                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8135                 break;
8136         case 0x3:
8137                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8138                 break;
8139         default:
8140                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8141                 break;
8142         }
8143
8144         if (!is_resolved)
8145                 *tp_mdix = ETH_TP_MDI_INVALID;
8146         else if (mdix)
8147                 *tp_mdix = ETH_TP_MDI_X;
8148         else
8149                 *tp_mdix = ETH_TP_MDI;
8150 }
8151
8152 static void hclge_info_show(struct hclge_dev *hdev)
8153 {
8154         struct device *dev = &hdev->pdev->dev;
8155
8156         dev_info(dev, "PF info begin:\n");
8157
8158         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8159         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8160         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8161         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8162         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8163         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8164         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8165         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8166         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8167         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8168         dev_info(dev, "This is %s PF\n",
8169                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8170         dev_info(dev, "DCB %s\n",
8171                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8172         dev_info(dev, "MQPRIO %s\n",
8173                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8174
8175         dev_info(dev, "PF info end.\n");
8176 }
8177
8178 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8179                                           struct hclge_vport *vport)
8180 {
8181         struct hnae3_client *client = vport->nic.client;
8182         struct hclge_dev *hdev = ae_dev->priv;
8183         int ret;
8184
8185         ret = client->ops->init_instance(&vport->nic);
8186         if (ret)
8187                 return ret;
8188
8189         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8190         hnae3_set_client_init_flag(client, ae_dev, 1);
8191
8192         /* Enable nic hw error interrupts */
8193         ret = hclge_config_nic_hw_error(hdev, true);
8194         if (ret)
8195                 dev_err(&ae_dev->pdev->dev,
8196                         "fail(%d) to enable hw error interrupts\n", ret);
8197
8198         if (netif_msg_drv(&hdev->vport->nic))
8199                 hclge_info_show(hdev);
8200
8201         return ret;
8202 }
8203
8204 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8205                                            struct hclge_vport *vport)
8206 {
8207         struct hnae3_client *client = vport->roce.client;
8208         struct hclge_dev *hdev = ae_dev->priv;
8209         int ret;
8210
8211         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8212             !hdev->nic_client)
8213                 return 0;
8214
8215         client = hdev->roce_client;
8216         ret = hclge_init_roce_base_info(vport);
8217         if (ret)
8218                 return ret;
8219
8220         ret = client->ops->init_instance(&vport->roce);
8221         if (ret)
8222                 return ret;
8223
8224         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8225         hnae3_set_client_init_flag(client, ae_dev, 1);
8226
8227         return 0;
8228 }
8229
8230 static int hclge_init_client_instance(struct hnae3_client *client,
8231                                       struct hnae3_ae_dev *ae_dev)
8232 {
8233         struct hclge_dev *hdev = ae_dev->priv;
8234         struct hclge_vport *vport;
8235         int i, ret;
8236
8237         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8238                 vport = &hdev->vport[i];
8239
8240                 switch (client->type) {
8241                 case HNAE3_CLIENT_KNIC:
8242
8243                         hdev->nic_client = client;
8244                         vport->nic.client = client;
8245                         ret = hclge_init_nic_client_instance(ae_dev, vport);
8246                         if (ret)
8247                                 goto clear_nic;
8248
8249                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8250                         if (ret)
8251                                 goto clear_roce;
8252
8253                         break;
8254                 case HNAE3_CLIENT_ROCE:
8255                         if (hnae3_dev_roce_supported(hdev)) {
8256                                 hdev->roce_client = client;
8257                                 vport->roce.client = client;
8258                         }
8259
8260                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8261                         if (ret)
8262                                 goto clear_roce;
8263
8264                         break;
8265                 default:
8266                         return -EINVAL;
8267                 }
8268         }
8269
8270         /* Enable roce ras interrupts */
8271         ret = hclge_config_rocee_ras_interrupt(hdev, true);
8272         if (ret)
8273                 dev_err(&ae_dev->pdev->dev,
8274                         "fail(%d) to enable roce ras interrupts\n", ret);
8275
8276         return ret;
8277
8278 clear_nic:
8279         hdev->nic_client = NULL;
8280         vport->nic.client = NULL;
8281         return ret;
8282 clear_roce:
8283         hdev->roce_client = NULL;
8284         vport->roce.client = NULL;
8285         return ret;
8286 }
8287
8288 static void hclge_uninit_client_instance(struct hnae3_client *client,
8289                                          struct hnae3_ae_dev *ae_dev)
8290 {
8291         struct hclge_dev *hdev = ae_dev->priv;
8292         struct hclge_vport *vport;
8293         int i;
8294
8295         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8296                 vport = &hdev->vport[i];
8297                 if (hdev->roce_client) {
8298                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8299                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8300                                                                 0);
8301                         hdev->roce_client = NULL;
8302                         vport->roce.client = NULL;
8303                 }
8304                 if (client->type == HNAE3_CLIENT_ROCE)
8305                         return;
8306                 if (hdev->nic_client && client->ops->uninit_instance) {
8307                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8308                         client->ops->uninit_instance(&vport->nic, 0);
8309                         hdev->nic_client = NULL;
8310                         vport->nic.client = NULL;
8311                 }
8312         }
8313 }
8314
8315 static int hclge_pci_init(struct hclge_dev *hdev)
8316 {
8317         struct pci_dev *pdev = hdev->pdev;
8318         struct hclge_hw *hw;
8319         int ret;
8320
8321         ret = pci_enable_device(pdev);
8322         if (ret) {
8323                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8324                 return ret;
8325         }
8326
8327         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8328         if (ret) {
8329                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8330                 if (ret) {
8331                         dev_err(&pdev->dev,
8332                                 "can't set consistent PCI DMA");
8333                         goto err_disable_device;
8334                 }
8335                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8336         }
8337
8338         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8339         if (ret) {
8340                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8341                 goto err_disable_device;
8342         }
8343
8344         pci_set_master(pdev);
8345         hw = &hdev->hw;
8346         hw->io_base = pcim_iomap(pdev, 2, 0);
8347         if (!hw->io_base) {
8348                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8349                 ret = -ENOMEM;
8350                 goto err_clr_master;
8351         }
8352
8353         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8354
8355         return 0;
8356 err_clr_master:
8357         pci_clear_master(pdev);
8358         pci_release_regions(pdev);
8359 err_disable_device:
8360         pci_disable_device(pdev);
8361
8362         return ret;
8363 }
8364
8365 static void hclge_pci_uninit(struct hclge_dev *hdev)
8366 {
8367         struct pci_dev *pdev = hdev->pdev;
8368
8369         pcim_iounmap(pdev, hdev->hw.io_base);
8370         pci_free_irq_vectors(pdev);
8371         pci_clear_master(pdev);
8372         pci_release_mem_regions(pdev);
8373         pci_disable_device(pdev);
8374 }
8375
8376 static void hclge_state_init(struct hclge_dev *hdev)
8377 {
8378         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8379         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8380         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8381         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8382         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8383         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8384 }
8385
8386 static void hclge_state_uninit(struct hclge_dev *hdev)
8387 {
8388         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8389         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8390
8391         if (hdev->service_timer.function)
8392                 del_timer_sync(&hdev->service_timer);
8393         if (hdev->reset_timer.function)
8394                 del_timer_sync(&hdev->reset_timer);
8395         if (hdev->service_task.func)
8396                 cancel_work_sync(&hdev->service_task);
8397         if (hdev->rst_service_task.func)
8398                 cancel_work_sync(&hdev->rst_service_task);
8399         if (hdev->mbx_service_task.func)
8400                 cancel_work_sync(&hdev->mbx_service_task);
8401 }
8402
8403 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8404 {
8405 #define HCLGE_FLR_WAIT_MS       100
8406 #define HCLGE_FLR_WAIT_CNT      50
8407         struct hclge_dev *hdev = ae_dev->priv;
8408         int cnt = 0;
8409
8410         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8411         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8412         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8413         hclge_reset_event(hdev->pdev, NULL);
8414
8415         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8416                cnt++ < HCLGE_FLR_WAIT_CNT)
8417                 msleep(HCLGE_FLR_WAIT_MS);
8418
8419         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8420                 dev_err(&hdev->pdev->dev,
8421                         "flr wait down timeout: %d\n", cnt);
8422 }
8423
8424 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8425 {
8426         struct hclge_dev *hdev = ae_dev->priv;
8427
8428         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8429 }
8430
8431 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8432 {
8433         struct pci_dev *pdev = ae_dev->pdev;
8434         struct hclge_dev *hdev;
8435         int ret;
8436
8437         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8438         if (!hdev) {
8439                 ret = -ENOMEM;
8440                 goto out;
8441         }
8442
8443         hdev->pdev = pdev;
8444         hdev->ae_dev = ae_dev;
8445         hdev->reset_type = HNAE3_NONE_RESET;
8446         hdev->reset_level = HNAE3_FUNC_RESET;
8447         ae_dev->priv = hdev;
8448         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8449
8450         mutex_init(&hdev->vport_lock);
8451         mutex_init(&hdev->vport_cfg_mutex);
8452         spin_lock_init(&hdev->fd_rule_lock);
8453
8454         ret = hclge_pci_init(hdev);
8455         if (ret) {
8456                 dev_err(&pdev->dev, "PCI init failed\n");
8457                 goto out;
8458         }
8459
8460         /* Firmware command queue initialize */
8461         ret = hclge_cmd_queue_init(hdev);
8462         if (ret) {
8463                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8464                 goto err_pci_uninit;
8465         }
8466
8467         /* Firmware command initialize */
8468         ret = hclge_cmd_init(hdev);
8469         if (ret)
8470                 goto err_cmd_uninit;
8471
8472         ret = hclge_get_cap(hdev);
8473         if (ret) {
8474                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8475                         ret);
8476                 goto err_cmd_uninit;
8477         }
8478
8479         ret = hclge_configure(hdev);
8480         if (ret) {
8481                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8482                 goto err_cmd_uninit;
8483         }
8484
8485         ret = hclge_init_msi(hdev);
8486         if (ret) {
8487                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8488                 goto err_cmd_uninit;
8489         }
8490
8491         ret = hclge_misc_irq_init(hdev);
8492         if (ret) {
8493                 dev_err(&pdev->dev,
8494                         "Misc IRQ(vector0) init error, ret = %d.\n",
8495                         ret);
8496                 goto err_msi_uninit;
8497         }
8498
8499         ret = hclge_alloc_tqps(hdev);
8500         if (ret) {
8501                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8502                 goto err_msi_irq_uninit;
8503         }
8504
8505         ret = hclge_alloc_vport(hdev);
8506         if (ret) {
8507                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8508                 goto err_msi_irq_uninit;
8509         }
8510
8511         ret = hclge_map_tqp(hdev);
8512         if (ret) {
8513                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8514                 goto err_msi_irq_uninit;
8515         }
8516
8517         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8518                 ret = hclge_mac_mdio_config(hdev);
8519                 if (ret) {
8520                         dev_err(&hdev->pdev->dev,
8521                                 "mdio config fail ret=%d\n", ret);
8522                         goto err_msi_irq_uninit;
8523                 }
8524         }
8525
8526         ret = hclge_init_umv_space(hdev);
8527         if (ret) {
8528                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8529                 goto err_mdiobus_unreg;
8530         }
8531
8532         ret = hclge_mac_init(hdev);
8533         if (ret) {
8534                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8535                 goto err_mdiobus_unreg;
8536         }
8537
8538         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8539         if (ret) {
8540                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8541                 goto err_mdiobus_unreg;
8542         }
8543
8544         ret = hclge_config_gro(hdev, true);
8545         if (ret)
8546                 goto err_mdiobus_unreg;
8547
8548         ret = hclge_init_vlan_config(hdev);
8549         if (ret) {
8550                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8551                 goto err_mdiobus_unreg;
8552         }
8553
8554         ret = hclge_tm_schd_init(hdev);
8555         if (ret) {
8556                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8557                 goto err_mdiobus_unreg;
8558         }
8559
8560         hclge_rss_init_cfg(hdev);
8561         ret = hclge_rss_init_hw(hdev);
8562         if (ret) {
8563                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8564                 goto err_mdiobus_unreg;
8565         }
8566
8567         ret = init_mgr_tbl(hdev);
8568         if (ret) {
8569                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8570                 goto err_mdiobus_unreg;
8571         }
8572
8573         ret = hclge_init_fd_config(hdev);
8574         if (ret) {
8575                 dev_err(&pdev->dev,
8576                         "fd table init fail, ret=%d\n", ret);
8577                 goto err_mdiobus_unreg;
8578         }
8579
8580         INIT_KFIFO(hdev->mac_tnl_log);
8581
8582         hclge_dcb_ops_set(hdev);
8583
8584         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8585         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8586         INIT_WORK(&hdev->service_task, hclge_service_task);
8587         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8588         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8589
8590         hclge_clear_all_event_cause(hdev);
8591
8592         /* Enable MISC vector(vector0) */
8593         hclge_enable_vector(&hdev->misc_vector, true);
8594
8595         hclge_state_init(hdev);
8596         hdev->last_reset_time = jiffies;
8597
8598         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8599         return 0;
8600
8601 err_mdiobus_unreg:
8602         if (hdev->hw.mac.phydev)
8603                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8604 err_msi_irq_uninit:
8605         hclge_misc_irq_uninit(hdev);
8606 err_msi_uninit:
8607         pci_free_irq_vectors(pdev);
8608 err_cmd_uninit:
8609         hclge_cmd_uninit(hdev);
8610 err_pci_uninit:
8611         pcim_iounmap(pdev, hdev->hw.io_base);
8612         pci_clear_master(pdev);
8613         pci_release_regions(pdev);
8614         pci_disable_device(pdev);
8615 out:
8616         return ret;
8617 }
8618
8619 static void hclge_stats_clear(struct hclge_dev *hdev)
8620 {
8621         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8622 }
8623
8624 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8625 {
8626         struct hclge_vport *vport = hdev->vport;
8627         int i;
8628
8629         for (i = 0; i < hdev->num_alloc_vport; i++) {
8630                 hclge_vport_stop(vport);
8631                 vport++;
8632         }
8633 }
8634
8635 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8636 {
8637         struct hclge_dev *hdev = ae_dev->priv;
8638         struct pci_dev *pdev = ae_dev->pdev;
8639         int ret;
8640
8641         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8642
8643         hclge_stats_clear(hdev);
8644         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8645         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8646
8647         ret = hclge_cmd_init(hdev);
8648         if (ret) {
8649                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8650                 return ret;
8651         }
8652
8653         ret = hclge_map_tqp(hdev);
8654         if (ret) {
8655                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8656                 return ret;
8657         }
8658
8659         hclge_reset_umv_space(hdev);
8660
8661         ret = hclge_mac_init(hdev);
8662         if (ret) {
8663                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8664                 return ret;
8665         }
8666
8667         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8668         if (ret) {
8669                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8670                 return ret;
8671         }
8672
8673         ret = hclge_config_gro(hdev, true);
8674         if (ret)
8675                 return ret;
8676
8677         ret = hclge_init_vlan_config(hdev);
8678         if (ret) {
8679                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8680                 return ret;
8681         }
8682
8683         ret = hclge_tm_init_hw(hdev, true);
8684         if (ret) {
8685                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8686                 return ret;
8687         }
8688
8689         ret = hclge_rss_init_hw(hdev);
8690         if (ret) {
8691                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8692                 return ret;
8693         }
8694
8695         ret = hclge_init_fd_config(hdev);
8696         if (ret) {
8697                 dev_err(&pdev->dev,
8698                         "fd table init fail, ret=%d\n", ret);
8699                 return ret;
8700         }
8701
8702         /* Re-enable the hw error interrupts because
8703          * the interrupts get disabled on global reset.
8704          */
8705         ret = hclge_config_nic_hw_error(hdev, true);
8706         if (ret) {
8707                 dev_err(&pdev->dev,
8708                         "fail(%d) to re-enable NIC hw error interrupts\n",
8709                         ret);
8710                 return ret;
8711         }
8712
8713         if (hdev->roce_client) {
8714                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8715                 if (ret) {
8716                         dev_err(&pdev->dev,
8717                                 "fail(%d) to re-enable roce ras interrupts\n",
8718                                 ret);
8719                         return ret;
8720                 }
8721         }
8722
8723         hclge_reset_vport_state(hdev);
8724
8725         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8726                  HCLGE_DRIVER_NAME);
8727
8728         return 0;
8729 }
8730
8731 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8732 {
8733         struct hclge_dev *hdev = ae_dev->priv;
8734         struct hclge_mac *mac = &hdev->hw.mac;
8735
8736         hclge_state_uninit(hdev);
8737
8738         if (mac->phydev)
8739                 mdiobus_unregister(mac->mdio_bus);
8740
8741         hclge_uninit_umv_space(hdev);
8742
8743         /* Disable MISC vector(vector0) */
8744         hclge_enable_vector(&hdev->misc_vector, false);
8745         synchronize_irq(hdev->misc_vector.vector_irq);
8746
8747         /* Disable all hw interrupts */
8748         hclge_config_mac_tnl_int(hdev, false);
8749         hclge_config_nic_hw_error(hdev, false);
8750         hclge_config_rocee_ras_interrupt(hdev, false);
8751
8752         hclge_cmd_uninit(hdev);
8753         hclge_misc_irq_uninit(hdev);
8754         hclge_pci_uninit(hdev);
8755         mutex_destroy(&hdev->vport_lock);
8756         hclge_uninit_vport_mac_table(hdev);
8757         hclge_uninit_vport_vlan_table(hdev);
8758         mutex_destroy(&hdev->vport_cfg_mutex);
8759         ae_dev->priv = NULL;
8760 }
8761
8762 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8763 {
8764         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8765         struct hclge_vport *vport = hclge_get_vport(handle);
8766         struct hclge_dev *hdev = vport->back;
8767
8768         return min_t(u32, hdev->rss_size_max,
8769                      vport->alloc_tqps / kinfo->num_tc);
8770 }
8771
8772 static void hclge_get_channels(struct hnae3_handle *handle,
8773                                struct ethtool_channels *ch)
8774 {
8775         ch->max_combined = hclge_get_max_channels(handle);
8776         ch->other_count = 1;
8777         ch->max_other = 1;
8778         ch->combined_count = handle->kinfo.rss_size;
8779 }
8780
8781 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8782                                         u16 *alloc_tqps, u16 *max_rss_size)
8783 {
8784         struct hclge_vport *vport = hclge_get_vport(handle);
8785         struct hclge_dev *hdev = vport->back;
8786
8787         *alloc_tqps = vport->alloc_tqps;
8788         *max_rss_size = hdev->rss_size_max;
8789 }
8790
8791 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8792                               bool rxfh_configured)
8793 {
8794         struct hclge_vport *vport = hclge_get_vport(handle);
8795         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8796         struct hclge_dev *hdev = vport->back;
8797         int cur_rss_size = kinfo->rss_size;
8798         int cur_tqps = kinfo->num_tqps;
8799         u16 tc_offset[HCLGE_MAX_TC_NUM];
8800         u16 tc_valid[HCLGE_MAX_TC_NUM];
8801         u16 tc_size[HCLGE_MAX_TC_NUM];
8802         u16 roundup_size;
8803         u32 *rss_indir;
8804         int ret, i;
8805
8806         kinfo->req_rss_size = new_tqps_num;
8807
8808         ret = hclge_tm_vport_map_update(hdev);
8809         if (ret) {
8810                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8811                 return ret;
8812         }
8813
8814         roundup_size = roundup_pow_of_two(kinfo->rss_size);
8815         roundup_size = ilog2(roundup_size);
8816         /* Set the RSS TC mode according to the new RSS size */
8817         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8818                 tc_valid[i] = 0;
8819
8820                 if (!(hdev->hw_tc_map & BIT(i)))
8821                         continue;
8822
8823                 tc_valid[i] = 1;
8824                 tc_size[i] = roundup_size;
8825                 tc_offset[i] = kinfo->rss_size * i;
8826         }
8827         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8828         if (ret)
8829                 return ret;
8830
8831         /* RSS indirection table has been configuared by user */
8832         if (rxfh_configured)
8833                 goto out;
8834
8835         /* Reinitializes the rss indirect table according to the new RSS size */
8836         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8837         if (!rss_indir)
8838                 return -ENOMEM;
8839
8840         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8841                 rss_indir[i] = i % kinfo->rss_size;
8842
8843         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8844         if (ret)
8845                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8846                         ret);
8847
8848         kfree(rss_indir);
8849
8850 out:
8851         if (!ret)
8852                 dev_info(&hdev->pdev->dev,
8853                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8854                          cur_rss_size, kinfo->rss_size,
8855                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
8856
8857         return ret;
8858 }
8859
8860 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8861                               u32 *regs_num_64_bit)
8862 {
8863         struct hclge_desc desc;
8864         u32 total_num;
8865         int ret;
8866
8867         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8868         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8869         if (ret) {
8870                 dev_err(&hdev->pdev->dev,
8871                         "Query register number cmd failed, ret = %d.\n", ret);
8872                 return ret;
8873         }
8874
8875         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8876         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8877
8878         total_num = *regs_num_32_bit + *regs_num_64_bit;
8879         if (!total_num)
8880                 return -EINVAL;
8881
8882         return 0;
8883 }
8884
8885 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8886                                  void *data)
8887 {
8888 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8889
8890         struct hclge_desc *desc;
8891         u32 *reg_val = data;
8892         __le32 *desc_data;
8893         int cmd_num;
8894         int i, k, n;
8895         int ret;
8896
8897         if (regs_num == 0)
8898                 return 0;
8899
8900         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8901         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8902         if (!desc)
8903                 return -ENOMEM;
8904
8905         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8906         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8907         if (ret) {
8908                 dev_err(&hdev->pdev->dev,
8909                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8910                 kfree(desc);
8911                 return ret;
8912         }
8913
8914         for (i = 0; i < cmd_num; i++) {
8915                 if (i == 0) {
8916                         desc_data = (__le32 *)(&desc[i].data[0]);
8917                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8918                 } else {
8919                         desc_data = (__le32 *)(&desc[i]);
8920                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8921                 }
8922                 for (k = 0; k < n; k++) {
8923                         *reg_val++ = le32_to_cpu(*desc_data++);
8924
8925                         regs_num--;
8926                         if (!regs_num)
8927                                 break;
8928                 }
8929         }
8930
8931         kfree(desc);
8932         return 0;
8933 }
8934
8935 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8936                                  void *data)
8937 {
8938 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8939
8940         struct hclge_desc *desc;
8941         u64 *reg_val = data;
8942         __le64 *desc_data;
8943         int cmd_num;
8944         int i, k, n;
8945         int ret;
8946
8947         if (regs_num == 0)
8948                 return 0;
8949
8950         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8951         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8952         if (!desc)
8953                 return -ENOMEM;
8954
8955         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8956         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8957         if (ret) {
8958                 dev_err(&hdev->pdev->dev,
8959                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
8960                 kfree(desc);
8961                 return ret;
8962         }
8963
8964         for (i = 0; i < cmd_num; i++) {
8965                 if (i == 0) {
8966                         desc_data = (__le64 *)(&desc[i].data[0]);
8967                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8968                 } else {
8969                         desc_data = (__le64 *)(&desc[i]);
8970                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
8971                 }
8972                 for (k = 0; k < n; k++) {
8973                         *reg_val++ = le64_to_cpu(*desc_data++);
8974
8975                         regs_num--;
8976                         if (!regs_num)
8977                                 break;
8978                 }
8979         }
8980
8981         kfree(desc);
8982         return 0;
8983 }
8984
8985 #define MAX_SEPARATE_NUM        4
8986 #define SEPARATOR_VALUE         0xFFFFFFFF
8987 #define REG_NUM_PER_LINE        4
8988 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
8989
8990 static int hclge_get_regs_len(struct hnae3_handle *handle)
8991 {
8992         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8993         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8994         struct hclge_vport *vport = hclge_get_vport(handle);
8995         struct hclge_dev *hdev = vport->back;
8996         u32 regs_num_32_bit, regs_num_64_bit;
8997         int ret;
8998
8999         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9000         if (ret) {
9001                 dev_err(&hdev->pdev->dev,
9002                         "Get register number failed, ret = %d.\n", ret);
9003                 return -EOPNOTSUPP;
9004         }
9005
9006         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9007         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9008         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9009         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9010
9011         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9012                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9013                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9014 }
9015
9016 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9017                            void *data)
9018 {
9019         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9020         struct hclge_vport *vport = hclge_get_vport(handle);
9021         struct hclge_dev *hdev = vport->back;
9022         u32 regs_num_32_bit, regs_num_64_bit;
9023         int i, j, reg_um, separator_num;
9024         u32 *reg = data;
9025         int ret;
9026
9027         *version = hdev->fw_version;
9028
9029         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9030         if (ret) {
9031                 dev_err(&hdev->pdev->dev,
9032                         "Get register number failed, ret = %d.\n", ret);
9033                 return;
9034         }
9035
9036         /* fetching per-PF registers valus from PF PCIe register space */
9037         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9038         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9039         for (i = 0; i < reg_um; i++)
9040                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9041         for (i = 0; i < separator_num; i++)
9042                 *reg++ = SEPARATOR_VALUE;
9043
9044         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9045         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9046         for (i = 0; i < reg_um; i++)
9047                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9048         for (i = 0; i < separator_num; i++)
9049                 *reg++ = SEPARATOR_VALUE;
9050
9051         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9052         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9053         for (j = 0; j < kinfo->num_tqps; j++) {
9054                 for (i = 0; i < reg_um; i++)
9055                         *reg++ = hclge_read_dev(&hdev->hw,
9056                                                 ring_reg_addr_list[i] +
9057                                                 0x200 * j);
9058                 for (i = 0; i < separator_num; i++)
9059                         *reg++ = SEPARATOR_VALUE;
9060         }
9061
9062         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9063         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9064         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9065                 for (i = 0; i < reg_um; i++)
9066                         *reg++ = hclge_read_dev(&hdev->hw,
9067                                                 tqp_intr_reg_addr_list[i] +
9068                                                 4 * j);
9069                 for (i = 0; i < separator_num; i++)
9070                         *reg++ = SEPARATOR_VALUE;
9071         }
9072
9073         /* fetching PF common registers values from firmware */
9074         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9075         if (ret) {
9076                 dev_err(&hdev->pdev->dev,
9077                         "Get 32 bit register failed, ret = %d.\n", ret);
9078                 return;
9079         }
9080
9081         reg += regs_num_32_bit;
9082         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9083         if (ret)
9084                 dev_err(&hdev->pdev->dev,
9085                         "Get 64 bit register failed, ret = %d.\n", ret);
9086 }
9087
9088 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9089 {
9090         struct hclge_set_led_state_cmd *req;
9091         struct hclge_desc desc;
9092         int ret;
9093
9094         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9095
9096         req = (struct hclge_set_led_state_cmd *)desc.data;
9097         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9098                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9099
9100         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9101         if (ret)
9102                 dev_err(&hdev->pdev->dev,
9103                         "Send set led state cmd error, ret =%d\n", ret);
9104
9105         return ret;
9106 }
9107
9108 enum hclge_led_status {
9109         HCLGE_LED_OFF,
9110         HCLGE_LED_ON,
9111         HCLGE_LED_NO_CHANGE = 0xFF,
9112 };
9113
9114 static int hclge_set_led_id(struct hnae3_handle *handle,
9115                             enum ethtool_phys_id_state status)
9116 {
9117         struct hclge_vport *vport = hclge_get_vport(handle);
9118         struct hclge_dev *hdev = vport->back;
9119
9120         switch (status) {
9121         case ETHTOOL_ID_ACTIVE:
9122                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9123         case ETHTOOL_ID_INACTIVE:
9124                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9125         default:
9126                 return -EINVAL;
9127         }
9128 }
9129
9130 static void hclge_get_link_mode(struct hnae3_handle *handle,
9131                                 unsigned long *supported,
9132                                 unsigned long *advertising)
9133 {
9134         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9135         struct hclge_vport *vport = hclge_get_vport(handle);
9136         struct hclge_dev *hdev = vport->back;
9137         unsigned int idx = 0;
9138
9139         for (; idx < size; idx++) {
9140                 supported[idx] = hdev->hw.mac.supported[idx];
9141                 advertising[idx] = hdev->hw.mac.advertising[idx];
9142         }
9143 }
9144
9145 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9146 {
9147         struct hclge_vport *vport = hclge_get_vport(handle);
9148         struct hclge_dev *hdev = vport->back;
9149
9150         return hclge_config_gro(hdev, enable);
9151 }
9152
9153 static const struct hnae3_ae_ops hclge_ops = {
9154         .init_ae_dev = hclge_init_ae_dev,
9155         .uninit_ae_dev = hclge_uninit_ae_dev,
9156         .flr_prepare = hclge_flr_prepare,
9157         .flr_done = hclge_flr_done,
9158         .init_client_instance = hclge_init_client_instance,
9159         .uninit_client_instance = hclge_uninit_client_instance,
9160         .map_ring_to_vector = hclge_map_ring_to_vector,
9161         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9162         .get_vector = hclge_get_vector,
9163         .put_vector = hclge_put_vector,
9164         .set_promisc_mode = hclge_set_promisc_mode,
9165         .set_loopback = hclge_set_loopback,
9166         .start = hclge_ae_start,
9167         .stop = hclge_ae_stop,
9168         .client_start = hclge_client_start,
9169         .client_stop = hclge_client_stop,
9170         .get_status = hclge_get_status,
9171         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9172         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9173         .get_media_type = hclge_get_media_type,
9174         .check_port_speed = hclge_check_port_speed,
9175         .get_fec = hclge_get_fec,
9176         .set_fec = hclge_set_fec,
9177         .get_rss_key_size = hclge_get_rss_key_size,
9178         .get_rss_indir_size = hclge_get_rss_indir_size,
9179         .get_rss = hclge_get_rss,
9180         .set_rss = hclge_set_rss,
9181         .set_rss_tuple = hclge_set_rss_tuple,
9182         .get_rss_tuple = hclge_get_rss_tuple,
9183         .get_tc_size = hclge_get_tc_size,
9184         .get_mac_addr = hclge_get_mac_addr,
9185         .set_mac_addr = hclge_set_mac_addr,
9186         .do_ioctl = hclge_do_ioctl,
9187         .add_uc_addr = hclge_add_uc_addr,
9188         .rm_uc_addr = hclge_rm_uc_addr,
9189         .add_mc_addr = hclge_add_mc_addr,
9190         .rm_mc_addr = hclge_rm_mc_addr,
9191         .set_autoneg = hclge_set_autoneg,
9192         .get_autoneg = hclge_get_autoneg,
9193         .restart_autoneg = hclge_restart_autoneg,
9194         .get_pauseparam = hclge_get_pauseparam,
9195         .set_pauseparam = hclge_set_pauseparam,
9196         .set_mtu = hclge_set_mtu,
9197         .reset_queue = hclge_reset_tqp,
9198         .get_stats = hclge_get_stats,
9199         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9200         .update_stats = hclge_update_stats,
9201         .get_strings = hclge_get_strings,
9202         .get_sset_count = hclge_get_sset_count,
9203         .get_fw_version = hclge_get_fw_version,
9204         .get_mdix_mode = hclge_get_mdix_mode,
9205         .enable_vlan_filter = hclge_enable_vlan_filter,
9206         .set_vlan_filter = hclge_set_vlan_filter,
9207         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9208         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9209         .reset_event = hclge_reset_event,
9210         .set_default_reset_request = hclge_set_def_reset_request,
9211         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9212         .set_channels = hclge_set_channels,
9213         .get_channels = hclge_get_channels,
9214         .get_regs_len = hclge_get_regs_len,
9215         .get_regs = hclge_get_regs,
9216         .set_led_id = hclge_set_led_id,
9217         .get_link_mode = hclge_get_link_mode,
9218         .add_fd_entry = hclge_add_fd_entry,
9219         .del_fd_entry = hclge_del_fd_entry,
9220         .del_all_fd_entries = hclge_del_all_fd_entries,
9221         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9222         .get_fd_rule_info = hclge_get_fd_rule_info,
9223         .get_fd_all_rules = hclge_get_all_rules,
9224         .restore_fd_rules = hclge_restore_fd_entries,
9225         .enable_fd = hclge_enable_fd,
9226         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9227         .dbg_run_cmd = hclge_dbg_run_cmd,
9228         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9229         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9230         .ae_dev_resetting = hclge_ae_dev_resetting,
9231         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9232         .set_gro_en = hclge_gro_en,
9233         .get_global_queue_id = hclge_covert_handle_qid_global,
9234         .set_timer_task = hclge_set_timer_task,
9235         .mac_connect_phy = hclge_mac_connect_phy,
9236         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9237         .restore_vlan_table = hclge_restore_vlan_table,
9238 };
9239
9240 static struct hnae3_ae_algo ae_algo = {
9241         .ops = &hclge_ops,
9242         .pdev_id_table = ae_algo_pci_tbl,
9243 };
9244
9245 static int hclge_init(void)
9246 {
9247         pr_info("%s is initializing\n", HCLGE_NAME);
9248
9249         hnae3_register_ae_algo(&ae_algo);
9250
9251         return 0;
9252 }
9253
9254 static void hclge_exit(void)
9255 {
9256         hnae3_unregister_ae_algo(&ae_algo);
9257 }
9258 module_init(hclge_init);
9259 module_exit(hclge_exit);
9260
9261 MODULE_LICENSE("GPL");
9262 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9263 MODULE_DESCRIPTION("HCLGE Driver");
9264 MODULE_VERSION(HCLGE_MOD_VERSION);