net: hns3: add handshake with hardware while doing reset
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37                                u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
40
41 static struct hnae3_ae_algo ae_algo;
42
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51         /* required last entry */
52         {0, }
53 };
54
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
56
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58                                          HCLGE_CMDQ_TX_ADDR_H_REG,
59                                          HCLGE_CMDQ_TX_DEPTH_REG,
60                                          HCLGE_CMDQ_TX_TAIL_REG,
61                                          HCLGE_CMDQ_TX_HEAD_REG,
62                                          HCLGE_CMDQ_RX_ADDR_L_REG,
63                                          HCLGE_CMDQ_RX_ADDR_H_REG,
64                                          HCLGE_CMDQ_RX_DEPTH_REG,
65                                          HCLGE_CMDQ_RX_TAIL_REG,
66                                          HCLGE_CMDQ_RX_HEAD_REG,
67                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
68                                          HCLGE_CMDQ_INTR_STS_REG,
69                                          HCLGE_CMDQ_INTR_EN_REG,
70                                          HCLGE_CMDQ_INTR_GEN_REG};
71
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73                                            HCLGE_VECTOR0_OTER_EN_REG,
74                                            HCLGE_MISC_RESET_STS_REG,
75                                            HCLGE_MISC_VECTOR_INT_STS,
76                                            HCLGE_GLOBAL_RESET_REG,
77                                            HCLGE_FUN_RST_ING,
78                                            HCLGE_GRO_EN_REG};
79
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81                                          HCLGE_RING_RX_ADDR_H_REG,
82                                          HCLGE_RING_RX_BD_NUM_REG,
83                                          HCLGE_RING_RX_BD_LENGTH_REG,
84                                          HCLGE_RING_RX_MERGE_EN_REG,
85                                          HCLGE_RING_RX_TAIL_REG,
86                                          HCLGE_RING_RX_HEAD_REG,
87                                          HCLGE_RING_RX_FBD_NUM_REG,
88                                          HCLGE_RING_RX_OFFSET_REG,
89                                          HCLGE_RING_RX_FBD_OFFSET_REG,
90                                          HCLGE_RING_RX_STASH_REG,
91                                          HCLGE_RING_RX_BD_ERR_REG,
92                                          HCLGE_RING_TX_ADDR_L_REG,
93                                          HCLGE_RING_TX_ADDR_H_REG,
94                                          HCLGE_RING_TX_BD_NUM_REG,
95                                          HCLGE_RING_TX_PRIORITY_REG,
96                                          HCLGE_RING_TX_TC_REG,
97                                          HCLGE_RING_TX_MERGE_EN_REG,
98                                          HCLGE_RING_TX_TAIL_REG,
99                                          HCLGE_RING_TX_HEAD_REG,
100                                          HCLGE_RING_TX_FBD_NUM_REG,
101                                          HCLGE_RING_TX_OFFSET_REG,
102                                          HCLGE_RING_TX_EBD_NUM_REG,
103                                          HCLGE_RING_TX_EBD_OFFSET_REG,
104                                          HCLGE_RING_TX_BD_ERR_REG,
105                                          HCLGE_RING_EN_REG};
106
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108                                              HCLGE_TQP_INTR_GL0_REG,
109                                              HCLGE_TQP_INTR_GL1_REG,
110                                              HCLGE_TQP_INTR_GL2_REG,
111                                              HCLGE_TQP_INTR_RL_REG};
112
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
114         "App    Loopback test",
115         "Serdes serial Loopback test",
116         "Serdes parallel Loopback test",
117         "Phy    Loopback test"
118 };
119
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121         {"mac_tx_mac_pause_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123         {"mac_rx_mac_pause_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125         {"mac_tx_control_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127         {"mac_rx_control_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129         {"mac_tx_pfc_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131         {"mac_tx_pfc_pri0_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133         {"mac_tx_pfc_pri1_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135         {"mac_tx_pfc_pri2_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137         {"mac_tx_pfc_pri3_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139         {"mac_tx_pfc_pri4_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141         {"mac_tx_pfc_pri5_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143         {"mac_tx_pfc_pri6_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145         {"mac_tx_pfc_pri7_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147         {"mac_rx_pfc_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149         {"mac_rx_pfc_pri0_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151         {"mac_rx_pfc_pri1_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153         {"mac_rx_pfc_pri2_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155         {"mac_rx_pfc_pri3_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157         {"mac_rx_pfc_pri4_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159         {"mac_rx_pfc_pri5_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161         {"mac_rx_pfc_pri6_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163         {"mac_rx_pfc_pri7_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165         {"mac_tx_total_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167         {"mac_tx_total_oct_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169         {"mac_tx_good_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171         {"mac_tx_bad_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173         {"mac_tx_good_oct_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175         {"mac_tx_bad_oct_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177         {"mac_tx_uni_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179         {"mac_tx_multi_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181         {"mac_tx_broad_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183         {"mac_tx_undersize_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185         {"mac_tx_oversize_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187         {"mac_tx_64_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189         {"mac_tx_65_127_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191         {"mac_tx_128_255_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193         {"mac_tx_256_511_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195         {"mac_tx_512_1023_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197         {"mac_tx_1024_1518_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199         {"mac_tx_1519_2047_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201         {"mac_tx_2048_4095_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203         {"mac_tx_4096_8191_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205         {"mac_tx_8192_9216_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207         {"mac_tx_9217_12287_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209         {"mac_tx_12288_16383_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211         {"mac_tx_1519_max_good_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213         {"mac_tx_1519_max_bad_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215         {"mac_rx_total_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217         {"mac_rx_total_oct_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219         {"mac_rx_good_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221         {"mac_rx_bad_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223         {"mac_rx_good_oct_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225         {"mac_rx_bad_oct_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227         {"mac_rx_uni_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229         {"mac_rx_multi_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231         {"mac_rx_broad_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233         {"mac_rx_undersize_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235         {"mac_rx_oversize_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237         {"mac_rx_64_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239         {"mac_rx_65_127_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241         {"mac_rx_128_255_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243         {"mac_rx_256_511_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245         {"mac_rx_512_1023_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247         {"mac_rx_1024_1518_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249         {"mac_rx_1519_2047_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251         {"mac_rx_2048_4095_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253         {"mac_rx_4096_8191_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255         {"mac_rx_8192_9216_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257         {"mac_rx_9217_12287_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259         {"mac_rx_12288_16383_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261         {"mac_rx_1519_max_good_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263         {"mac_rx_1519_max_bad_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
265
266         {"mac_tx_fragment_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268         {"mac_tx_undermin_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270         {"mac_tx_jabber_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272         {"mac_tx_err_all_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274         {"mac_tx_from_app_good_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276         {"mac_tx_from_app_bad_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278         {"mac_rx_fragment_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280         {"mac_rx_undermin_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282         {"mac_rx_jabber_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284         {"mac_rx_fcs_err_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286         {"mac_rx_send_app_good_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288         {"mac_rx_send_app_bad_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
290 };
291
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
293         {
294                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298                 .i_port_bitmap = 0x1,
299         },
300 };
301
302 static const u8 hclge_hash_key[] = {
303         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
308 };
309
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
311 {
312 #define HCLGE_MAC_CMD_NUM 21
313
314         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
316         __le64 *desc_data;
317         int i, k, n;
318         int ret;
319
320         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
322         if (ret) {
323                 dev_err(&hdev->pdev->dev,
324                         "Get MAC pkt stats fail, status = %d.\n", ret);
325
326                 return ret;
327         }
328
329         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330                 /* for special opcode 0032, only the first desc has the head */
331                 if (unlikely(i == 0)) {
332                         desc_data = (__le64 *)(&desc[i].data[0]);
333                         n = HCLGE_RD_FIRST_STATS_NUM;
334                 } else {
335                         desc_data = (__le64 *)(&desc[i]);
336                         n = HCLGE_RD_OTHER_STATS_NUM;
337                 }
338
339                 for (k = 0; k < n; k++) {
340                         *data += le64_to_cpu(*desc_data);
341                         data++;
342                         desc_data++;
343                 }
344         }
345
346         return 0;
347 }
348
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
350 {
351         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352         struct hclge_desc *desc;
353         __le64 *desc_data;
354         u16 i, k, n;
355         int ret;
356
357         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
358         if (!desc)
359                 return -ENOMEM;
360         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
362         if (ret) {
363                 kfree(desc);
364                 return ret;
365         }
366
367         for (i = 0; i < desc_num; i++) {
368                 /* for special opcode 0034, only the first desc has the head */
369                 if (i == 0) {
370                         desc_data = (__le64 *)(&desc[i].data[0]);
371                         n = HCLGE_RD_FIRST_STATS_NUM;
372                 } else {
373                         desc_data = (__le64 *)(&desc[i]);
374                         n = HCLGE_RD_OTHER_STATS_NUM;
375                 }
376
377                 for (k = 0; k < n; k++) {
378                         *data += le64_to_cpu(*desc_data);
379                         data++;
380                         desc_data++;
381                 }
382         }
383
384         kfree(desc);
385
386         return 0;
387 }
388
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
390 {
391         struct hclge_desc desc;
392         __le32 *desc_data;
393         u32 reg_num;
394         int ret;
395
396         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
398         if (ret)
399                 return ret;
400
401         desc_data = (__le32 *)(&desc.data[0]);
402         reg_num = le32_to_cpu(*desc_data);
403
404         *desc_num = 1 + ((reg_num - 3) >> 2) +
405                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
406
407         return 0;
408 }
409
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
411 {
412         u32 desc_num;
413         int ret;
414
415         ret = hclge_mac_query_reg_num(hdev, &desc_num);
416
417         /* The firmware supports the new statistics acquisition method */
418         if (!ret)
419                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420         else if (ret == -EOPNOTSUPP)
421                 ret = hclge_mac_update_stats_defective(hdev);
422         else
423                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
424
425         return ret;
426 }
427
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
429 {
430         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431         struct hclge_vport *vport = hclge_get_vport(handle);
432         struct hclge_dev *hdev = vport->back;
433         struct hnae3_queue *queue;
434         struct hclge_desc desc[1];
435         struct hclge_tqp *tqp;
436         int ret, i;
437
438         for (i = 0; i < kinfo->num_tqps; i++) {
439                 queue = handle->kinfo.tqp[i];
440                 tqp = container_of(queue, struct hclge_tqp, q);
441                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442                 hclge_cmd_setup_basic_desc(&desc[0],
443                                            HCLGE_OPC_QUERY_RX_STATUS,
444                                            true);
445
446                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
448                 if (ret) {
449                         dev_err(&hdev->pdev->dev,
450                                 "Query tqp stat fail, status = %d,queue = %d\n",
451                                 ret,    i);
452                         return ret;
453                 }
454                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455                         le32_to_cpu(desc[0].data[1]);
456         }
457
458         for (i = 0; i < kinfo->num_tqps; i++) {
459                 queue = handle->kinfo.tqp[i];
460                 tqp = container_of(queue, struct hclge_tqp, q);
461                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462                 hclge_cmd_setup_basic_desc(&desc[0],
463                                            HCLGE_OPC_QUERY_TX_STATUS,
464                                            true);
465
466                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
468                 if (ret) {
469                         dev_err(&hdev->pdev->dev,
470                                 "Query tqp stat fail, status = %d,queue = %d\n",
471                                 ret, i);
472                         return ret;
473                 }
474                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475                         le32_to_cpu(desc[0].data[1]);
476         }
477
478         return 0;
479 }
480
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
482 {
483         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484         struct hclge_tqp *tqp;
485         u64 *buff = data;
486         int i;
487
488         for (i = 0; i < kinfo->num_tqps; i++) {
489                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
491         }
492
493         for (i = 0; i < kinfo->num_tqps; i++) {
494                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
496         }
497
498         return buff;
499 }
500
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
502 {
503         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
504
505         return kinfo->num_tqps * (2);
506 }
507
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
509 {
510         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
511         u8 *buff = data;
512         int i = 0;
513
514         for (i = 0; i < kinfo->num_tqps; i++) {
515                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516                         struct hclge_tqp, q);
517                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
518                          tqp->index);
519                 buff = buff + ETH_GSTRING_LEN;
520         }
521
522         for (i = 0; i < kinfo->num_tqps; i++) {
523                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524                         struct hclge_tqp, q);
525                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
526                          tqp->index);
527                 buff = buff + ETH_GSTRING_LEN;
528         }
529
530         return buff;
531 }
532
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534                                  const struct hclge_comm_stats_str strs[],
535                                  int size, u64 *data)
536 {
537         u64 *buf = data;
538         u32 i;
539
540         for (i = 0; i < size; i++)
541                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
542
543         return buf + size;
544 }
545
546 static u8 *hclge_comm_get_strings(u32 stringset,
547                                   const struct hclge_comm_stats_str strs[],
548                                   int size, u8 *data)
549 {
550         char *buff = (char *)data;
551         u32 i;
552
553         if (stringset != ETH_SS_STATS)
554                 return buff;
555
556         for (i = 0; i < size; i++) {
557                 snprintf(buff, ETH_GSTRING_LEN,
558                          strs[i].desc);
559                 buff = buff + ETH_GSTRING_LEN;
560         }
561
562         return (u8 *)buff;
563 }
564
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
566 {
567         struct hnae3_handle *handle;
568         int status;
569
570         handle = &hdev->vport[0].nic;
571         if (handle->client) {
572                 status = hclge_tqps_update_stats(handle);
573                 if (status) {
574                         dev_err(&hdev->pdev->dev,
575                                 "Update TQPS stats fail, status = %d.\n",
576                                 status);
577                 }
578         }
579
580         status = hclge_mac_update_stats(hdev);
581         if (status)
582                 dev_err(&hdev->pdev->dev,
583                         "Update MAC stats fail, status = %d.\n", status);
584 }
585
586 static void hclge_update_stats(struct hnae3_handle *handle,
587                                struct net_device_stats *net_stats)
588 {
589         struct hclge_vport *vport = hclge_get_vport(handle);
590         struct hclge_dev *hdev = vport->back;
591         int status;
592
593         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
594                 return;
595
596         status = hclge_mac_update_stats(hdev);
597         if (status)
598                 dev_err(&hdev->pdev->dev,
599                         "Update MAC stats fail, status = %d.\n",
600                         status);
601
602         status = hclge_tqps_update_stats(handle);
603         if (status)
604                 dev_err(&hdev->pdev->dev,
605                         "Update TQPS stats fail, status = %d.\n",
606                         status);
607
608         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
609 }
610
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614                 HNAE3_SUPPORT_PHY_LOOPBACK |\
615                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
617
618         struct hclge_vport *vport = hclge_get_vport(handle);
619         struct hclge_dev *hdev = vport->back;
620         int count = 0;
621
622         /* Loopback test support rules:
623          * mac: only GE mode support
624          * serdes: all mac mode will support include GE/XGE/LGE/CGE
625          * phy: only support when phy device exist on board
626          */
627         if (stringset == ETH_SS_TEST) {
628                 /* clear loopback bit flags at first */
629                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630                 if (hdev->pdev->revision >= 0x21 ||
631                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
634                         count += 1;
635                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
636                 }
637
638                 count += 2;
639                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641         } else if (stringset == ETH_SS_STATS) {
642                 count = ARRAY_SIZE(g_mac_stats_string) +
643                         hclge_tqps_get_sset_count(handle, stringset);
644         }
645
646         return count;
647 }
648
649 static void hclge_get_strings(struct hnae3_handle *handle,
650                               u32 stringset,
651                               u8 *data)
652 {
653         u8 *p = (char *)data;
654         int size;
655
656         if (stringset == ETH_SS_STATS) {
657                 size = ARRAY_SIZE(g_mac_stats_string);
658                 p = hclge_comm_get_strings(stringset,
659                                            g_mac_stats_string,
660                                            size,
661                                            p);
662                 p = hclge_tqps_get_strings(handle, p);
663         } else if (stringset == ETH_SS_TEST) {
664                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
665                         memcpy(p,
666                                hns3_nic_test_strs[HNAE3_LOOP_APP],
667                                ETH_GSTRING_LEN);
668                         p += ETH_GSTRING_LEN;
669                 }
670                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
671                         memcpy(p,
672                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
673                                ETH_GSTRING_LEN);
674                         p += ETH_GSTRING_LEN;
675                 }
676                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
677                         memcpy(p,
678                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
679                                ETH_GSTRING_LEN);
680                         p += ETH_GSTRING_LEN;
681                 }
682                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
683                         memcpy(p,
684                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
685                                ETH_GSTRING_LEN);
686                         p += ETH_GSTRING_LEN;
687                 }
688         }
689 }
690
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
692 {
693         struct hclge_vport *vport = hclge_get_vport(handle);
694         struct hclge_dev *hdev = vport->back;
695         u64 *p;
696
697         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
698                                  g_mac_stats_string,
699                                  ARRAY_SIZE(g_mac_stats_string),
700                                  data);
701         p = hclge_tqps_get_stats(handle, p);
702 }
703
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
705                                      u64 *rx_cnt)
706 {
707         struct hclge_vport *vport = hclge_get_vport(handle);
708         struct hclge_dev *hdev = vport->back;
709
710         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
712 }
713
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715                                    struct hclge_func_status_cmd *status)
716 {
717         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
718                 return -EINVAL;
719
720         /* Set the pf to main pf */
721         if (status->pf_state & HCLGE_PF_STATE_MAIN)
722                 hdev->flag |= HCLGE_FLAG_MAIN;
723         else
724                 hdev->flag &= ~HCLGE_FLAG_MAIN;
725
726         return 0;
727 }
728
729 static int hclge_query_function_status(struct hclge_dev *hdev)
730 {
731         struct hclge_func_status_cmd *req;
732         struct hclge_desc desc;
733         int timeout = 0;
734         int ret;
735
736         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737         req = (struct hclge_func_status_cmd *)desc.data;
738
739         do {
740                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
741                 if (ret) {
742                         dev_err(&hdev->pdev->dev,
743                                 "query function status failed %d.\n",
744                                 ret);
745
746                         return ret;
747                 }
748
749                 /* Check pf reset is done */
750                 if (req->pf_state)
751                         break;
752                 usleep_range(1000, 2000);
753         } while (timeout++ < 5);
754
755         ret = hclge_parse_func_status(hdev, req);
756
757         return ret;
758 }
759
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
761 {
762         struct hclge_pf_res_cmd *req;
763         struct hclge_desc desc;
764         int ret;
765
766         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
768         if (ret) {
769                 dev_err(&hdev->pdev->dev,
770                         "query pf resource failed %d.\n", ret);
771                 return ret;
772         }
773
774         req = (struct hclge_pf_res_cmd *)desc.data;
775         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
777
778         if (req->tx_buf_size)
779                 hdev->tx_buf_size =
780                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
781         else
782                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
783
784         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
785
786         if (req->dv_buf_size)
787                 hdev->dv_buf_size =
788                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
789         else
790                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
791
792         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
793
794         if (hnae3_dev_roce_supported(hdev)) {
795                 hdev->roce_base_msix_offset =
796                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
798                 hdev->num_roce_msi =
799                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
801
802                 /* PF should have NIC vectors and Roce vectors,
803                  * NIC vectors are queued before Roce vectors.
804                  */
805                 hdev->num_msi = hdev->num_roce_msi  +
806                                 hdev->roce_base_msix_offset;
807         } else {
808                 hdev->num_msi =
809                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
811         }
812
813         return 0;
814 }
815
816 static int hclge_parse_speed(int speed_cmd, int *speed)
817 {
818         switch (speed_cmd) {
819         case 6:
820                 *speed = HCLGE_MAC_SPEED_10M;
821                 break;
822         case 7:
823                 *speed = HCLGE_MAC_SPEED_100M;
824                 break;
825         case 0:
826                 *speed = HCLGE_MAC_SPEED_1G;
827                 break;
828         case 1:
829                 *speed = HCLGE_MAC_SPEED_10G;
830                 break;
831         case 2:
832                 *speed = HCLGE_MAC_SPEED_25G;
833                 break;
834         case 3:
835                 *speed = HCLGE_MAC_SPEED_40G;
836                 break;
837         case 4:
838                 *speed = HCLGE_MAC_SPEED_50G;
839                 break;
840         case 5:
841                 *speed = HCLGE_MAC_SPEED_100G;
842                 break;
843         default:
844                 return -EINVAL;
845         }
846
847         return 0;
848 }
849
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
851 {
852         struct hclge_vport *vport = hclge_get_vport(handle);
853         struct hclge_dev *hdev = vport->back;
854         u32 speed_ability = hdev->hw.mac.speed_ability;
855         u32 speed_bit = 0;
856
857         switch (speed) {
858         case HCLGE_MAC_SPEED_10M:
859                 speed_bit = HCLGE_SUPPORT_10M_BIT;
860                 break;
861         case HCLGE_MAC_SPEED_100M:
862                 speed_bit = HCLGE_SUPPORT_100M_BIT;
863                 break;
864         case HCLGE_MAC_SPEED_1G:
865                 speed_bit = HCLGE_SUPPORT_1G_BIT;
866                 break;
867         case HCLGE_MAC_SPEED_10G:
868                 speed_bit = HCLGE_SUPPORT_10G_BIT;
869                 break;
870         case HCLGE_MAC_SPEED_25G:
871                 speed_bit = HCLGE_SUPPORT_25G_BIT;
872                 break;
873         case HCLGE_MAC_SPEED_40G:
874                 speed_bit = HCLGE_SUPPORT_40G_BIT;
875                 break;
876         case HCLGE_MAC_SPEED_50G:
877                 speed_bit = HCLGE_SUPPORT_50G_BIT;
878                 break;
879         case HCLGE_MAC_SPEED_100G:
880                 speed_bit = HCLGE_SUPPORT_100G_BIT;
881                 break;
882         default:
883                 return -EINVAL;
884         }
885
886         if (speed_bit & speed_ability)
887                 return 0;
888
889         return -EINVAL;
890 }
891
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
893 {
894         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
896                                  mac->supported);
897         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
899                                  mac->supported);
900         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
902                                  mac->supported);
903         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
905                                  mac->supported);
906         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
908                                  mac->supported);
909 }
910
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
912 {
913         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
915                                  mac->supported);
916         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
918                                  mac->supported);
919         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
921                                  mac->supported);
922         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
924                                  mac->supported);
925         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
927                                  mac->supported);
928 }
929
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
931 {
932         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
934                                  mac->supported);
935         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
937                                  mac->supported);
938         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
940                                  mac->supported);
941         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
943                                  mac->supported);
944         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
946                                  mac->supported);
947 }
948
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
950 {
951         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
953                                  mac->supported);
954         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
956                                  mac->supported);
957         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
959                                  mac->supported);
960         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
962                                  mac->supported);
963         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
965                                  mac->supported);
966         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
968                                  mac->supported);
969 }
970
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
972 {
973         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
975
976         switch (mac->speed) {
977         case HCLGE_MAC_SPEED_10G:
978         case HCLGE_MAC_SPEED_40G:
979                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
980                                  mac->supported);
981                 mac->fec_ability =
982                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
983                 break;
984         case HCLGE_MAC_SPEED_25G:
985         case HCLGE_MAC_SPEED_50G:
986                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
987                                  mac->supported);
988                 mac->fec_ability =
989                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
990                         BIT(HNAE3_FEC_AUTO);
991                 break;
992         case HCLGE_MAC_SPEED_100G:
993                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
995                 break;
996         default:
997                 mac->fec_ability = 0;
998                 break;
999         }
1000 }
1001
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1003                                         u8 speed_ability)
1004 {
1005         struct hclge_mac *mac = &hdev->hw.mac;
1006
1007         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1009                                  mac->supported);
1010
1011         hclge_convert_setting_sr(mac, speed_ability);
1012         hclge_convert_setting_lr(mac, speed_ability);
1013         hclge_convert_setting_cr(mac, speed_ability);
1014         if (hdev->pdev->revision >= 0x21)
1015                 hclge_convert_setting_fec(mac);
1016
1017         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1020 }
1021
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1023                                             u8 speed_ability)
1024 {
1025         struct hclge_mac *mac = &hdev->hw.mac;
1026
1027         hclge_convert_setting_kr(mac, speed_ability);
1028         if (hdev->pdev->revision >= 0x21)
1029                 hclge_convert_setting_fec(mac);
1030         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1033 }
1034
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1036                                          u8 speed_ability)
1037 {
1038         unsigned long *supported = hdev->hw.mac.supported;
1039
1040         /* default to support all speed for GE port */
1041         if (!speed_ability)
1042                 speed_ability = HCLGE_SUPPORT_GE;
1043
1044         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1046                                  supported);
1047
1048         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1050                                  supported);
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1052                                  supported);
1053         }
1054
1055         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1058         }
1059
1060         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1063 }
1064
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1066 {
1067         u8 media_type = hdev->hw.mac.media_type;
1068
1069         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072                 hclge_parse_copper_link_mode(hdev, speed_ability);
1073         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1075 }
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1077 {
1078         struct hclge_cfg_param_cmd *req;
1079         u64 mac_addr_tmp_high;
1080         u64 mac_addr_tmp;
1081         int i;
1082
1083         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1084
1085         /* get the configuration */
1086         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1087                                               HCLGE_CFG_VMDQ_M,
1088                                               HCLGE_CFG_VMDQ_S);
1089         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092                                             HCLGE_CFG_TQP_DESC_N_M,
1093                                             HCLGE_CFG_TQP_DESC_N_S);
1094
1095         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096                                         HCLGE_CFG_PHY_ADDR_M,
1097                                         HCLGE_CFG_PHY_ADDR_S);
1098         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099                                           HCLGE_CFG_MEDIA_TP_M,
1100                                           HCLGE_CFG_MEDIA_TP_S);
1101         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102                                           HCLGE_CFG_RX_BUF_LEN_M,
1103                                           HCLGE_CFG_RX_BUF_LEN_S);
1104         /* get mac_address */
1105         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107                                             HCLGE_CFG_MAC_ADDR_H_M,
1108                                             HCLGE_CFG_MAC_ADDR_H_S);
1109
1110         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1111
1112         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113                                              HCLGE_CFG_DEFAULT_SPEED_M,
1114                                              HCLGE_CFG_DEFAULT_SPEED_S);
1115         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116                                             HCLGE_CFG_RSS_SIZE_M,
1117                                             HCLGE_CFG_RSS_SIZE_S);
1118
1119         for (i = 0; i < ETH_ALEN; i++)
1120                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1121
1122         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1124
1125         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126                                              HCLGE_CFG_SPEED_ABILITY_M,
1127                                              HCLGE_CFG_SPEED_ABILITY_S);
1128         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1130                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1131         if (!cfg->umv_space)
1132                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1133 }
1134
1135 /* hclge_get_cfg: query the static parameter from flash
1136  * @hdev: pointer to struct hclge_dev
1137  * @hcfg: the config structure to be getted
1138  */
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1140 {
1141         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142         struct hclge_cfg_param_cmd *req;
1143         int i, ret;
1144
1145         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1146                 u32 offset = 0;
1147
1148                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1150                                            true);
1151                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153                 /* Len should be united by 4 bytes when send to hardware */
1154                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156                 req->offset = cpu_to_le32(offset);
1157         }
1158
1159         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1160         if (ret) {
1161                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1162                 return ret;
1163         }
1164
1165         hclge_parse_cfg(hcfg, desc);
1166
1167         return 0;
1168 }
1169
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1171 {
1172         int ret;
1173
1174         ret = hclge_query_function_status(hdev);
1175         if (ret) {
1176                 dev_err(&hdev->pdev->dev,
1177                         "query function status error %d.\n", ret);
1178                 return ret;
1179         }
1180
1181         /* get pf resource */
1182         ret = hclge_query_pf_resource(hdev);
1183         if (ret)
1184                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1185
1186         return ret;
1187 }
1188
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1190 {
1191 #define HCLGE_MIN_TX_DESC       64
1192 #define HCLGE_MIN_RX_DESC       64
1193
1194         if (!is_kdump_kernel())
1195                 return;
1196
1197         dev_info(&hdev->pdev->dev,
1198                  "Running kdump kernel. Using minimal resources\n");
1199
1200         /* minimal queue pairs equals to the number of vports */
1201         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1204 }
1205
1206 static int hclge_configure(struct hclge_dev *hdev)
1207 {
1208         struct hclge_cfg cfg;
1209         int ret, i;
1210
1211         ret = hclge_get_cfg(hdev, &cfg);
1212         if (ret) {
1213                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1214                 return ret;
1215         }
1216
1217         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218         hdev->base_tqp_pid = 0;
1219         hdev->rss_size_max = cfg.rss_size_max;
1220         hdev->rx_buf_len = cfg.rx_buf_len;
1221         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222         hdev->hw.mac.media_type = cfg.media_type;
1223         hdev->hw.mac.phy_addr = cfg.phy_addr;
1224         hdev->num_tx_desc = cfg.tqp_desc_num;
1225         hdev->num_rx_desc = cfg.tqp_desc_num;
1226         hdev->tm_info.num_pg = 1;
1227         hdev->tc_max = cfg.tc_num;
1228         hdev->tm_info.hw_pfc_map = 0;
1229         hdev->wanted_umv_size = cfg.umv_space;
1230
1231         if (hnae3_dev_fd_supported(hdev)) {
1232                 hdev->fd_en = true;
1233                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1234         }
1235
1236         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1237         if (ret) {
1238                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1239                 return ret;
1240         }
1241
1242         hclge_parse_link_mode(hdev, cfg.speed_ability);
1243
1244         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245             (hdev->tc_max < 1)) {
1246                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1247                          hdev->tc_max);
1248                 hdev->tc_max = 1;
1249         }
1250
1251         /* Dev does not support DCB */
1252         if (!hnae3_dev_dcb_supported(hdev)) {
1253                 hdev->tc_max = 1;
1254                 hdev->pfc_max = 0;
1255         } else {
1256                 hdev->pfc_max = hdev->tc_max;
1257         }
1258
1259         hdev->tm_info.num_tc = 1;
1260
1261         /* Currently not support uncontiuous tc */
1262         for (i = 0; i < hdev->tm_info.num_tc; i++)
1263                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1264
1265         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1266
1267         hclge_init_kdump_kernel_config(hdev);
1268
1269         return ret;
1270 }
1271
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1273                             int tso_mss_max)
1274 {
1275         struct hclge_cfg_tso_status_cmd *req;
1276         struct hclge_desc desc;
1277         u16 tso_mss;
1278
1279         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1280
1281         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1282
1283         tso_mss = 0;
1284         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286         req->tso_mss_min = cpu_to_le16(tso_mss);
1287
1288         tso_mss = 0;
1289         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291         req->tso_mss_max = cpu_to_le16(tso_mss);
1292
1293         return hclge_cmd_send(&hdev->hw, &desc, 1);
1294 }
1295
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1297 {
1298         struct hclge_cfg_gro_status_cmd *req;
1299         struct hclge_desc desc;
1300         int ret;
1301
1302         if (!hnae3_dev_gro_supported(hdev))
1303                 return 0;
1304
1305         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1307
1308         req->gro_en = cpu_to_le16(en ? 1 : 0);
1309
1310         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1311         if (ret)
1312                 dev_err(&hdev->pdev->dev,
1313                         "GRO hardware config cmd failed, ret = %d\n", ret);
1314
1315         return ret;
1316 }
1317
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1319 {
1320         struct hclge_tqp *tqp;
1321         int i;
1322
1323         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1325         if (!hdev->htqp)
1326                 return -ENOMEM;
1327
1328         tqp = hdev->htqp;
1329
1330         for (i = 0; i < hdev->num_tqps; i++) {
1331                 tqp->dev = &hdev->pdev->dev;
1332                 tqp->index = i;
1333
1334                 tqp->q.ae_algo = &ae_algo;
1335                 tqp->q.buf_size = hdev->rx_buf_len;
1336                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339                         i * HCLGE_TQP_REG_SIZE;
1340
1341                 tqp++;
1342         }
1343
1344         return 0;
1345 }
1346
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1349 {
1350         struct hclge_tqp_map_cmd *req;
1351         struct hclge_desc desc;
1352         int ret;
1353
1354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1355
1356         req = (struct hclge_tqp_map_cmd *)desc.data;
1357         req->tqp_id = cpu_to_le16(tqp_pid);
1358         req->tqp_vf = func_id;
1359         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360                         1 << HCLGE_TQP_MAP_EN_B;
1361         req->tqp_vid = cpu_to_le16(tqp_vid);
1362
1363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1364         if (ret)
1365                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1366
1367         return ret;
1368 }
1369
1370 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1371 {
1372         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373         struct hclge_dev *hdev = vport->back;
1374         int i, alloced;
1375
1376         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377              alloced < num_tqps; i++) {
1378                 if (!hdev->htqp[i].alloced) {
1379                         hdev->htqp[i].q.handle = &vport->nic;
1380                         hdev->htqp[i].q.tqp_index = alloced;
1381                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384                         hdev->htqp[i].alloced = true;
1385                         alloced++;
1386                 }
1387         }
1388         vport->alloc_tqps = alloced;
1389         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1391
1392         return 0;
1393 }
1394
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396                             u16 num_tx_desc, u16 num_rx_desc)
1397
1398 {
1399         struct hnae3_handle *nic = &vport->nic;
1400         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401         struct hclge_dev *hdev = vport->back;
1402         int ret;
1403
1404         kinfo->num_tx_desc = num_tx_desc;
1405         kinfo->num_rx_desc = num_rx_desc;
1406
1407         kinfo->rx_buf_len = hdev->rx_buf_len;
1408
1409         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1411         if (!kinfo->tqp)
1412                 return -ENOMEM;
1413
1414         ret = hclge_assign_tqp(vport, num_tqps);
1415         if (ret)
1416                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1417
1418         return ret;
1419 }
1420
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422                                   struct hclge_vport *vport)
1423 {
1424         struct hnae3_handle *nic = &vport->nic;
1425         struct hnae3_knic_private_info *kinfo;
1426         u16 i;
1427
1428         kinfo = &nic->kinfo;
1429         for (i = 0; i < vport->alloc_tqps; i++) {
1430                 struct hclge_tqp *q =
1431                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1432                 bool is_pf;
1433                 int ret;
1434
1435                 is_pf = !(vport->vport_id);
1436                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1437                                              i, is_pf);
1438                 if (ret)
1439                         return ret;
1440         }
1441
1442         return 0;
1443 }
1444
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1446 {
1447         struct hclge_vport *vport = hdev->vport;
1448         u16 i, num_vport;
1449
1450         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451         for (i = 0; i < num_vport; i++) {
1452                 int ret;
1453
1454                 ret = hclge_map_tqp_to_vport(hdev, vport);
1455                 if (ret)
1456                         return ret;
1457
1458                 vport++;
1459         }
1460
1461         return 0;
1462 }
1463
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1465 {
1466         /* this would be initialized later */
1467 }
1468
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1470 {
1471         struct hnae3_handle *nic = &vport->nic;
1472         struct hclge_dev *hdev = vport->back;
1473         int ret;
1474
1475         nic->pdev = hdev->pdev;
1476         nic->ae_algo = &ae_algo;
1477         nic->numa_node_mask = hdev->numa_node_mask;
1478
1479         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480                 ret = hclge_knic_setup(vport, num_tqps,
1481                                        hdev->num_tx_desc, hdev->num_rx_desc);
1482
1483                 if (ret) {
1484                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1485                                 ret);
1486                         return ret;
1487                 }
1488         } else {
1489                 hclge_unic_setup(vport, num_tqps);
1490         }
1491
1492         return 0;
1493 }
1494
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1496 {
1497         struct pci_dev *pdev = hdev->pdev;
1498         struct hclge_vport *vport;
1499         u32 tqp_main_vport;
1500         u32 tqp_per_vport;
1501         int num_vport, i;
1502         int ret;
1503
1504         /* We need to alloc a vport for main NIC of PF */
1505         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1506
1507         if (hdev->num_tqps < num_vport) {
1508                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509                         hdev->num_tqps, num_vport);
1510                 return -EINVAL;
1511         }
1512
1513         /* Alloc the same number of TQPs for every vport */
1514         tqp_per_vport = hdev->num_tqps / num_vport;
1515         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1516
1517         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1518                              GFP_KERNEL);
1519         if (!vport)
1520                 return -ENOMEM;
1521
1522         hdev->vport = vport;
1523         hdev->num_alloc_vport = num_vport;
1524
1525         if (IS_ENABLED(CONFIG_PCI_IOV))
1526                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1527
1528         for (i = 0; i < num_vport; i++) {
1529                 vport->back = hdev;
1530                 vport->vport_id = i;
1531                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534                 INIT_LIST_HEAD(&vport->vlan_list);
1535                 INIT_LIST_HEAD(&vport->uc_mac_list);
1536                 INIT_LIST_HEAD(&vport->mc_mac_list);
1537
1538                 if (i == 0)
1539                         ret = hclge_vport_setup(vport, tqp_main_vport);
1540                 else
1541                         ret = hclge_vport_setup(vport, tqp_per_vport);
1542                 if (ret) {
1543                         dev_err(&pdev->dev,
1544                                 "vport setup failed for vport %d, %d\n",
1545                                 i, ret);
1546                         return ret;
1547                 }
1548
1549                 vport++;
1550         }
1551
1552         return 0;
1553 }
1554
1555 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556                                     struct hclge_pkt_buf_alloc *buf_alloc)
1557 {
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1561         struct hclge_tx_buff_alloc_cmd *req;
1562         struct hclge_desc desc;
1563         int ret;
1564         u8 i;
1565
1566         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1567
1568         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1571
1572                 req->tx_pkt_buff[i] =
1573                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1575         }
1576
1577         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1578         if (ret)
1579                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1580                         ret);
1581
1582         return ret;
1583 }
1584
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586                                  struct hclge_pkt_buf_alloc *buf_alloc)
1587 {
1588         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1589
1590         if (ret)
1591                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1592
1593         return ret;
1594 }
1595
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1597 {
1598         int i, cnt = 0;
1599
1600         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601                 if (hdev->hw_tc_map & BIT(i))
1602                         cnt++;
1603         return cnt;
1604 }
1605
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608                                   struct hclge_pkt_buf_alloc *buf_alloc)
1609 {
1610         struct hclge_priv_buf *priv;
1611         int i, cnt = 0;
1612
1613         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614                 priv = &buf_alloc->priv_buf[i];
1615                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1616                     priv->enable)
1617                         cnt++;
1618         }
1619
1620         return cnt;
1621 }
1622
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625                                      struct hclge_pkt_buf_alloc *buf_alloc)
1626 {
1627         struct hclge_priv_buf *priv;
1628         int i, cnt = 0;
1629
1630         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631                 priv = &buf_alloc->priv_buf[i];
1632                 if (hdev->hw_tc_map & BIT(i) &&
1633                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1634                     priv->enable)
1635                         cnt++;
1636         }
1637
1638         return cnt;
1639 }
1640
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1642 {
1643         struct hclge_priv_buf *priv;
1644         u32 rx_priv = 0;
1645         int i;
1646
1647         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648                 priv = &buf_alloc->priv_buf[i];
1649                 if (priv->enable)
1650                         rx_priv += priv->buf_size;
1651         }
1652         return rx_priv;
1653 }
1654
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1656 {
1657         u32 i, total_tx_size = 0;
1658
1659         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1661
1662         return total_tx_size;
1663 }
1664
1665 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666                                 struct hclge_pkt_buf_alloc *buf_alloc,
1667                                 u32 rx_all)
1668 {
1669         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670         u32 tc_num = hclge_get_tc_num(hdev);
1671         u32 shared_buf, aligned_mps;
1672         u32 rx_priv;
1673         int i;
1674
1675         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1676
1677         if (hnae3_dev_dcb_supported(hdev))
1678                 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1679         else
1680                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681                                         + hdev->dv_buf_size;
1682
1683         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685                              HCLGE_BUF_SIZE_UNIT);
1686
1687         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688         if (rx_all < rx_priv + shared_std)
1689                 return false;
1690
1691         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692         buf_alloc->s_buf.buf_size = shared_buf;
1693         if (hnae3_dev_dcb_supported(hdev)) {
1694                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696                         - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1697         } else {
1698                 buf_alloc->s_buf.self.high = aligned_mps +
1699                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700                 buf_alloc->s_buf.self.low = aligned_mps;
1701         }
1702
1703         if (hnae3_dev_dcb_supported(hdev)) {
1704                 if (tc_num)
1705                         hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1706                 else
1707                         hi_thrd = shared_buf - hdev->dv_buf_size;
1708
1709                 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711                 lo_thrd = hi_thrd - aligned_mps / 2;
1712         } else {
1713                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714                 lo_thrd = aligned_mps;
1715         }
1716
1717         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1720         }
1721
1722         return true;
1723 }
1724
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726                                 struct hclge_pkt_buf_alloc *buf_alloc)
1727 {
1728         u32 i, total_size;
1729
1730         total_size = hdev->pkt_buf_size;
1731
1732         /* alloc tx buffer for all enabled tc */
1733         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1735
1736                 if (hdev->hw_tc_map & BIT(i)) {
1737                         if (total_size < hdev->tx_buf_size)
1738                                 return -ENOMEM;
1739
1740                         priv->tx_buf_size = hdev->tx_buf_size;
1741                 } else {
1742                         priv->tx_buf_size = 0;
1743                 }
1744
1745                 total_size -= priv->tx_buf_size;
1746         }
1747
1748         return 0;
1749 }
1750
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752                                   struct hclge_pkt_buf_alloc *buf_alloc)
1753 {
1754         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1756         int i;
1757
1758         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1760
1761                 priv->enable = 0;
1762                 priv->wl.low = 0;
1763                 priv->wl.high = 0;
1764                 priv->buf_size = 0;
1765
1766                 if (!(hdev->hw_tc_map & BIT(i)))
1767                         continue;
1768
1769                 priv->enable = 1;
1770
1771                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772                         priv->wl.low = max ? aligned_mps : 256;
1773                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774                                                 HCLGE_BUF_SIZE_UNIT);
1775                 } else {
1776                         priv->wl.low = 0;
1777                         priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1778                 }
1779
1780                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1781         }
1782
1783         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1784 }
1785
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787                                           struct hclge_pkt_buf_alloc *buf_alloc)
1788 {
1789         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1791         int i;
1792
1793         /* let the last to be cleared first */
1794         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1796
1797                 if (hdev->hw_tc_map & BIT(i) &&
1798                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799                         /* Clear the no pfc TC private buffer */
1800                         priv->wl.low = 0;
1801                         priv->wl.high = 0;
1802                         priv->buf_size = 0;
1803                         priv->enable = 0;
1804                         no_pfc_priv_num--;
1805                 }
1806
1807                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808                     no_pfc_priv_num == 0)
1809                         break;
1810         }
1811
1812         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1813 }
1814
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816                                         struct hclge_pkt_buf_alloc *buf_alloc)
1817 {
1818         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1820         int i;
1821
1822         /* let the last to be cleared first */
1823         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1825
1826                 if (hdev->hw_tc_map & BIT(i) &&
1827                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1828                         /* Reduce the number of pfc TC with private buffer */
1829                         priv->wl.low = 0;
1830                         priv->enable = 0;
1831                         priv->wl.high = 0;
1832                         priv->buf_size = 0;
1833                         pfc_priv_num--;
1834                 }
1835
1836                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1837                     pfc_priv_num == 0)
1838                         break;
1839         }
1840
1841         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1842 }
1843
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845  * @hdev: pointer to struct hclge_dev
1846  * @buf_alloc: pointer to buffer calculation data
1847  * @return: 0: calculate sucessful, negative: fail
1848  */
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850                                 struct hclge_pkt_buf_alloc *buf_alloc)
1851 {
1852         /* When DCB is not supported, rx private buffer is not allocated. */
1853         if (!hnae3_dev_dcb_supported(hdev)) {
1854                 u32 rx_all = hdev->pkt_buf_size;
1855
1856                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1858                         return -ENOMEM;
1859
1860                 return 0;
1861         }
1862
1863         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1864                 return 0;
1865
1866         /* try to decrease the buffer size */
1867         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1868                 return 0;
1869
1870         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1871                 return 0;
1872
1873         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1874                 return 0;
1875
1876         return -ENOMEM;
1877 }
1878
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880                                    struct hclge_pkt_buf_alloc *buf_alloc)
1881 {
1882         struct hclge_rx_priv_buff_cmd *req;
1883         struct hclge_desc desc;
1884         int ret;
1885         int i;
1886
1887         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1889
1890         /* Alloc private buffer TCs */
1891         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1893
1894                 req->buf_num[i] =
1895                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1896                 req->buf_num[i] |=
1897                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1898         }
1899
1900         req->shared_buf =
1901                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1903
1904         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1905         if (ret)
1906                 dev_err(&hdev->pdev->dev,
1907                         "rx private buffer alloc cmd failed %d\n", ret);
1908
1909         return ret;
1910 }
1911
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913                                    struct hclge_pkt_buf_alloc *buf_alloc)
1914 {
1915         struct hclge_rx_priv_wl_buf *req;
1916         struct hclge_priv_buf *priv;
1917         struct hclge_desc desc[2];
1918         int i, j;
1919         int ret;
1920
1921         for (i = 0; i < 2; i++) {
1922                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1923                                            false);
1924                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1925
1926                 /* The first descriptor set the NEXT bit to 1 */
1927                 if (i == 0)
1928                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1929                 else
1930                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1931
1932                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1934
1935                         priv = &buf_alloc->priv_buf[idx];
1936                         req->tc_wl[j].high =
1937                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938                         req->tc_wl[j].high |=
1939                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1940                         req->tc_wl[j].low =
1941                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942                         req->tc_wl[j].low |=
1943                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1944                 }
1945         }
1946
1947         /* Send 2 descriptor at one time */
1948         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1949         if (ret)
1950                 dev_err(&hdev->pdev->dev,
1951                         "rx private waterline config cmd failed %d\n",
1952                         ret);
1953         return ret;
1954 }
1955
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957                                     struct hclge_pkt_buf_alloc *buf_alloc)
1958 {
1959         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960         struct hclge_rx_com_thrd *req;
1961         struct hclge_desc desc[2];
1962         struct hclge_tc_thrd *tc;
1963         int i, j;
1964         int ret;
1965
1966         for (i = 0; i < 2; i++) {
1967                 hclge_cmd_setup_basic_desc(&desc[i],
1968                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1970
1971                 /* The first descriptor set the NEXT bit to 1 */
1972                 if (i == 0)
1973                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1974                 else
1975                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1976
1977                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1979
1980                         req->com_thrd[j].high =
1981                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982                         req->com_thrd[j].high |=
1983                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984                         req->com_thrd[j].low =
1985                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986                         req->com_thrd[j].low |=
1987                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1988                 }
1989         }
1990
1991         /* Send 2 descriptors at one time */
1992         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1993         if (ret)
1994                 dev_err(&hdev->pdev->dev,
1995                         "common threshold config cmd failed %d\n", ret);
1996         return ret;
1997 }
1998
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000                                   struct hclge_pkt_buf_alloc *buf_alloc)
2001 {
2002         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003         struct hclge_rx_com_wl *req;
2004         struct hclge_desc desc;
2005         int ret;
2006
2007         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2008
2009         req = (struct hclge_rx_com_wl *)desc.data;
2010         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2012
2013         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2015
2016         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2017         if (ret)
2018                 dev_err(&hdev->pdev->dev,
2019                         "common waterline config cmd failed %d\n", ret);
2020
2021         return ret;
2022 }
2023
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2025 {
2026         struct hclge_pkt_buf_alloc *pkt_buf;
2027         int ret;
2028
2029         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2030         if (!pkt_buf)
2031                 return -ENOMEM;
2032
2033         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2034         if (ret) {
2035                 dev_err(&hdev->pdev->dev,
2036                         "could not calc tx buffer size for all TCs %d\n", ret);
2037                 goto out;
2038         }
2039
2040         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2041         if (ret) {
2042                 dev_err(&hdev->pdev->dev,
2043                         "could not alloc tx buffers %d\n", ret);
2044                 goto out;
2045         }
2046
2047         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2048         if (ret) {
2049                 dev_err(&hdev->pdev->dev,
2050                         "could not calc rx priv buffer size for all TCs %d\n",
2051                         ret);
2052                 goto out;
2053         }
2054
2055         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2056         if (ret) {
2057                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2058                         ret);
2059                 goto out;
2060         }
2061
2062         if (hnae3_dev_dcb_supported(hdev)) {
2063                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2064                 if (ret) {
2065                         dev_err(&hdev->pdev->dev,
2066                                 "could not configure rx private waterline %d\n",
2067                                 ret);
2068                         goto out;
2069                 }
2070
2071                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2072                 if (ret) {
2073                         dev_err(&hdev->pdev->dev,
2074                                 "could not configure common threshold %d\n",
2075                                 ret);
2076                         goto out;
2077                 }
2078         }
2079
2080         ret = hclge_common_wl_config(hdev, pkt_buf);
2081         if (ret)
2082                 dev_err(&hdev->pdev->dev,
2083                         "could not configure common waterline %d\n", ret);
2084
2085 out:
2086         kfree(pkt_buf);
2087         return ret;
2088 }
2089
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2091 {
2092         struct hnae3_handle *roce = &vport->roce;
2093         struct hnae3_handle *nic = &vport->nic;
2094
2095         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2096
2097         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098             vport->back->num_msi_left == 0)
2099                 return -EINVAL;
2100
2101         roce->rinfo.base_vector = vport->back->roce_base_vector;
2102
2103         roce->rinfo.netdev = nic->kinfo.netdev;
2104         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2105
2106         roce->pdev = nic->pdev;
2107         roce->ae_algo = nic->ae_algo;
2108         roce->numa_node_mask = nic->numa_node_mask;
2109
2110         return 0;
2111 }
2112
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2114 {
2115         struct pci_dev *pdev = hdev->pdev;
2116         int vectors;
2117         int i;
2118
2119         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2121         if (vectors < 0) {
2122                 dev_err(&pdev->dev,
2123                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2124                         vectors);
2125                 return vectors;
2126         }
2127         if (vectors < hdev->num_msi)
2128                 dev_warn(&hdev->pdev->dev,
2129                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130                          hdev->num_msi, vectors);
2131
2132         hdev->num_msi = vectors;
2133         hdev->num_msi_left = vectors;
2134         hdev->base_msi_vector = pdev->irq;
2135         hdev->roce_base_vector = hdev->base_msi_vector +
2136                                 hdev->roce_base_msix_offset;
2137
2138         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139                                            sizeof(u16), GFP_KERNEL);
2140         if (!hdev->vector_status) {
2141                 pci_free_irq_vectors(pdev);
2142                 return -ENOMEM;
2143         }
2144
2145         for (i = 0; i < hdev->num_msi; i++)
2146                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2147
2148         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149                                         sizeof(int), GFP_KERNEL);
2150         if (!hdev->vector_irq) {
2151                 pci_free_irq_vectors(pdev);
2152                 return -ENOMEM;
2153         }
2154
2155         return 0;
2156 }
2157
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2159 {
2160
2161         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162                 duplex = HCLGE_MAC_FULL;
2163
2164         return duplex;
2165 }
2166
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2168                                       u8 duplex)
2169 {
2170         struct hclge_config_mac_speed_dup_cmd *req;
2171         struct hclge_desc desc;
2172         int ret;
2173
2174         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2175
2176         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2177
2178         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2179
2180         switch (speed) {
2181         case HCLGE_MAC_SPEED_10M:
2182                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183                                 HCLGE_CFG_SPEED_S, 6);
2184                 break;
2185         case HCLGE_MAC_SPEED_100M:
2186                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187                                 HCLGE_CFG_SPEED_S, 7);
2188                 break;
2189         case HCLGE_MAC_SPEED_1G:
2190                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191                                 HCLGE_CFG_SPEED_S, 0);
2192                 break;
2193         case HCLGE_MAC_SPEED_10G:
2194                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195                                 HCLGE_CFG_SPEED_S, 1);
2196                 break;
2197         case HCLGE_MAC_SPEED_25G:
2198                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199                                 HCLGE_CFG_SPEED_S, 2);
2200                 break;
2201         case HCLGE_MAC_SPEED_40G:
2202                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203                                 HCLGE_CFG_SPEED_S, 3);
2204                 break;
2205         case HCLGE_MAC_SPEED_50G:
2206                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207                                 HCLGE_CFG_SPEED_S, 4);
2208                 break;
2209         case HCLGE_MAC_SPEED_100G:
2210                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211                                 HCLGE_CFG_SPEED_S, 5);
2212                 break;
2213         default:
2214                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2215                 return -EINVAL;
2216         }
2217
2218         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2219                       1);
2220
2221         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2222         if (ret) {
2223                 dev_err(&hdev->pdev->dev,
2224                         "mac speed/duplex config cmd failed %d.\n", ret);
2225                 return ret;
2226         }
2227
2228         return 0;
2229 }
2230
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2232 {
2233         int ret;
2234
2235         duplex = hclge_check_speed_dup(duplex, speed);
2236         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2237                 return 0;
2238
2239         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2240         if (ret)
2241                 return ret;
2242
2243         hdev->hw.mac.speed = speed;
2244         hdev->hw.mac.duplex = duplex;
2245
2246         return 0;
2247 }
2248
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2250                                      u8 duplex)
2251 {
2252         struct hclge_vport *vport = hclge_get_vport(handle);
2253         struct hclge_dev *hdev = vport->back;
2254
2255         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2256 }
2257
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2259 {
2260         struct hclge_config_auto_neg_cmd *req;
2261         struct hclge_desc desc;
2262         u32 flag = 0;
2263         int ret;
2264
2265         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2266
2267         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2270
2271         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2272         if (ret)
2273                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2274                         ret);
2275
2276         return ret;
2277 }
2278
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2280 {
2281         struct hclge_vport *vport = hclge_get_vport(handle);
2282         struct hclge_dev *hdev = vport->back;
2283
2284         if (!hdev->hw.mac.support_autoneg) {
2285                 if (enable) {
2286                         dev_err(&hdev->pdev->dev,
2287                                 "autoneg is not supported by current port\n");
2288                         return -EOPNOTSUPP;
2289                 } else {
2290                         return 0;
2291                 }
2292         }
2293
2294         return hclge_set_autoneg_en(hdev, enable);
2295 }
2296
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2298 {
2299         struct hclge_vport *vport = hclge_get_vport(handle);
2300         struct hclge_dev *hdev = vport->back;
2301         struct phy_device *phydev = hdev->hw.mac.phydev;
2302
2303         if (phydev)
2304                 return phydev->autoneg;
2305
2306         return hdev->hw.mac.autoneg;
2307 }
2308
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2310 {
2311         struct hclge_vport *vport = hclge_get_vport(handle);
2312         struct hclge_dev *hdev = vport->back;
2313         int ret;
2314
2315         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2316
2317         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2318         if (ret)
2319                 return ret;
2320         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2321 }
2322
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2324 {
2325         struct hclge_config_fec_cmd *req;
2326         struct hclge_desc desc;
2327         int ret;
2328
2329         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2330
2331         req = (struct hclge_config_fec_cmd *)desc.data;
2332         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334         if (fec_mode & BIT(HNAE3_FEC_RS))
2335                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337         if (fec_mode & BIT(HNAE3_FEC_BASER))
2338                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2340
2341         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2342         if (ret)
2343                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2344
2345         return ret;
2346 }
2347
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2349 {
2350         struct hclge_vport *vport = hclge_get_vport(handle);
2351         struct hclge_dev *hdev = vport->back;
2352         struct hclge_mac *mac = &hdev->hw.mac;
2353         int ret;
2354
2355         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2357                 return -EINVAL;
2358         }
2359
2360         ret = hclge_set_fec_hw(hdev, fec_mode);
2361         if (ret)
2362                 return ret;
2363
2364         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2365         return 0;
2366 }
2367
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2369                           u8 *fec_mode)
2370 {
2371         struct hclge_vport *vport = hclge_get_vport(handle);
2372         struct hclge_dev *hdev = vport->back;
2373         struct hclge_mac *mac = &hdev->hw.mac;
2374
2375         if (fec_ability)
2376                 *fec_ability = mac->fec_ability;
2377         if (fec_mode)
2378                 *fec_mode = mac->fec_mode;
2379 }
2380
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2382 {
2383         struct hclge_mac *mac = &hdev->hw.mac;
2384         int ret;
2385
2386         hdev->support_sfp_query = true;
2387         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389                                          hdev->hw.mac.duplex);
2390         if (ret) {
2391                 dev_err(&hdev->pdev->dev,
2392                         "Config mac speed dup fail ret=%d\n", ret);
2393                 return ret;
2394         }
2395
2396         mac->link = 0;
2397
2398         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2400                 if (ret) {
2401                         dev_err(&hdev->pdev->dev,
2402                                 "Fec mode init fail, ret = %d\n", ret);
2403                         return ret;
2404                 }
2405         }
2406
2407         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2408         if (ret) {
2409                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2410                 return ret;
2411         }
2412
2413         ret = hclge_buffer_alloc(hdev);
2414         if (ret)
2415                 dev_err(&hdev->pdev->dev,
2416                         "allocate buffer fail, ret=%d\n", ret);
2417
2418         return ret;
2419 }
2420
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2422 {
2423         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425                 schedule_work(&hdev->mbx_service_task);
2426 }
2427
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2429 {
2430         if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2431                 schedule_work(&hdev->rst_service_task);
2432 }
2433
2434 static void hclge_task_schedule(struct hclge_dev *hdev)
2435 {
2436         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2437             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2438             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2439                 (void)schedule_work(&hdev->service_task);
2440 }
2441
2442 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2443 {
2444         struct hclge_link_status_cmd *req;
2445         struct hclge_desc desc;
2446         int link_status;
2447         int ret;
2448
2449         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2450         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2451         if (ret) {
2452                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2453                         ret);
2454                 return ret;
2455         }
2456
2457         req = (struct hclge_link_status_cmd *)desc.data;
2458         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2459
2460         return !!link_status;
2461 }
2462
2463 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2464 {
2465         int mac_state;
2466         int link_stat;
2467
2468         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2469                 return 0;
2470
2471         mac_state = hclge_get_mac_link_status(hdev);
2472
2473         if (hdev->hw.mac.phydev) {
2474                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2475                         link_stat = mac_state &
2476                                 hdev->hw.mac.phydev->link;
2477                 else
2478                         link_stat = 0;
2479
2480         } else {
2481                 link_stat = mac_state;
2482         }
2483
2484         return !!link_stat;
2485 }
2486
2487 static void hclge_update_link_status(struct hclge_dev *hdev)
2488 {
2489         struct hnae3_client *rclient = hdev->roce_client;
2490         struct hnae3_client *client = hdev->nic_client;
2491         struct hnae3_handle *rhandle;
2492         struct hnae3_handle *handle;
2493         int state;
2494         int i;
2495
2496         if (!client)
2497                 return;
2498         state = hclge_get_mac_phy_link(hdev);
2499         if (state != hdev->hw.mac.link) {
2500                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2501                         handle = &hdev->vport[i].nic;
2502                         client->ops->link_status_change(handle, state);
2503                         hclge_config_mac_tnl_int(hdev, state);
2504                         rhandle = &hdev->vport[i].roce;
2505                         if (rclient && rclient->ops->link_status_change)
2506                                 rclient->ops->link_status_change(rhandle,
2507                                                                  state);
2508                 }
2509                 hdev->hw.mac.link = state;
2510         }
2511 }
2512
2513 static void hclge_update_port_capability(struct hclge_mac *mac)
2514 {
2515         /* update fec ability by speed */
2516         hclge_convert_setting_fec(mac);
2517
2518         /* firmware can not identify back plane type, the media type
2519          * read from configuration can help deal it
2520          */
2521         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2522             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2523                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2524         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2525                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2526
2527         if (mac->support_autoneg == true) {
2528                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2529                 linkmode_copy(mac->advertising, mac->supported);
2530         } else {
2531                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2532                                    mac->supported);
2533                 linkmode_zero(mac->advertising);
2534         }
2535 }
2536
2537 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2538 {
2539         struct hclge_sfp_info_cmd *resp = NULL;
2540         struct hclge_desc desc;
2541         int ret;
2542
2543         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2544         resp = (struct hclge_sfp_info_cmd *)desc.data;
2545         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2546         if (ret == -EOPNOTSUPP) {
2547                 dev_warn(&hdev->pdev->dev,
2548                          "IMP do not support get SFP speed %d\n", ret);
2549                 return ret;
2550         } else if (ret) {
2551                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2552                 return ret;
2553         }
2554
2555         *speed = le32_to_cpu(resp->speed);
2556
2557         return 0;
2558 }
2559
2560 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2561 {
2562         struct hclge_sfp_info_cmd *resp;
2563         struct hclge_desc desc;
2564         int ret;
2565
2566         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2567         resp = (struct hclge_sfp_info_cmd *)desc.data;
2568
2569         resp->query_type = QUERY_ACTIVE_SPEED;
2570
2571         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572         if (ret == -EOPNOTSUPP) {
2573                 dev_warn(&hdev->pdev->dev,
2574                          "IMP does not support get SFP info %d\n", ret);
2575                 return ret;
2576         } else if (ret) {
2577                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2578                 return ret;
2579         }
2580
2581         mac->speed = le32_to_cpu(resp->speed);
2582         /* if resp->speed_ability is 0, it means it's an old version
2583          * firmware, do not update these params
2584          */
2585         if (resp->speed_ability) {
2586                 mac->module_type = le32_to_cpu(resp->module_type);
2587                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2588                 mac->autoneg = resp->autoneg;
2589                 mac->support_autoneg = resp->autoneg_ability;
2590                 if (!resp->active_fec)
2591                         mac->fec_mode = 0;
2592                 else
2593                         mac->fec_mode = BIT(resp->active_fec);
2594         } else {
2595                 mac->speed_type = QUERY_SFP_SPEED;
2596         }
2597
2598         return 0;
2599 }
2600
2601 static int hclge_update_port_info(struct hclge_dev *hdev)
2602 {
2603         struct hclge_mac *mac = &hdev->hw.mac;
2604         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2605         int ret;
2606
2607         /* get the port info from SFP cmd if not copper port */
2608         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2609                 return 0;
2610
2611         /* if IMP does not support get SFP/qSFP info, return directly */
2612         if (!hdev->support_sfp_query)
2613                 return 0;
2614
2615         if (hdev->pdev->revision >= 0x21)
2616                 ret = hclge_get_sfp_info(hdev, mac);
2617         else
2618                 ret = hclge_get_sfp_speed(hdev, &speed);
2619
2620         if (ret == -EOPNOTSUPP) {
2621                 hdev->support_sfp_query = false;
2622                 return ret;
2623         } else if (ret) {
2624                 return ret;
2625         }
2626
2627         if (hdev->pdev->revision >= 0x21) {
2628                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2629                         hclge_update_port_capability(mac);
2630                         return 0;
2631                 }
2632                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2633                                                HCLGE_MAC_FULL);
2634         } else {
2635                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2636                         return 0; /* do nothing if no SFP */
2637
2638                 /* must config full duplex for SFP */
2639                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2640         }
2641 }
2642
2643 static int hclge_get_status(struct hnae3_handle *handle)
2644 {
2645         struct hclge_vport *vport = hclge_get_vport(handle);
2646         struct hclge_dev *hdev = vport->back;
2647
2648         hclge_update_link_status(hdev);
2649
2650         return hdev->hw.mac.link;
2651 }
2652
2653 static void hclge_service_timer(struct timer_list *t)
2654 {
2655         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2656
2657         mod_timer(&hdev->service_timer, jiffies + HZ);
2658         hdev->hw_stats.stats_timer++;
2659         hdev->fd_arfs_expire_timer++;
2660         hclge_task_schedule(hdev);
2661 }
2662
2663 static void hclge_service_complete(struct hclge_dev *hdev)
2664 {
2665         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2666
2667         /* Flush memory before next watchdog */
2668         smp_mb__before_atomic();
2669         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2670 }
2671
2672 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2673 {
2674         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2675
2676         /* fetch the events from their corresponding regs */
2677         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2678         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2679         msix_src_reg = hclge_read_dev(&hdev->hw,
2680                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2681
2682         /* Assumption: If by any chance reset and mailbox events are reported
2683          * together then we will only process reset event in this go and will
2684          * defer the processing of the mailbox events. Since, we would have not
2685          * cleared RX CMDQ event this time we would receive again another
2686          * interrupt from H/W just for the mailbox.
2687          */
2688
2689         /* check for vector0 reset event sources */
2690         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2691                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2692                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2693                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2694                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2695                 hdev->rst_stats.imp_rst_cnt++;
2696                 return HCLGE_VECTOR0_EVENT_RST;
2697         }
2698
2699         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2700                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2701                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2702                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2703                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2704                 hdev->rst_stats.global_rst_cnt++;
2705                 return HCLGE_VECTOR0_EVENT_RST;
2706         }
2707
2708         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2709                 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2710                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2711                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2712                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2713                 hdev->rst_stats.core_rst_cnt++;
2714                 return HCLGE_VECTOR0_EVENT_RST;
2715         }
2716
2717         /* check for vector0 msix event source */
2718         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2719                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2720                         msix_src_reg);
2721                 return HCLGE_VECTOR0_EVENT_ERR;
2722         }
2723
2724         /* check for vector0 mailbox(=CMDQ RX) event source */
2725         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2726                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2727                 *clearval = cmdq_src_reg;
2728                 return HCLGE_VECTOR0_EVENT_MBX;
2729         }
2730
2731         /* print other vector0 event source */
2732         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2733                 cmdq_src_reg, msix_src_reg);
2734         return HCLGE_VECTOR0_EVENT_OTHER;
2735 }
2736
2737 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2738                                     u32 regclr)
2739 {
2740         switch (event_type) {
2741         case HCLGE_VECTOR0_EVENT_RST:
2742                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2743                 break;
2744         case HCLGE_VECTOR0_EVENT_MBX:
2745                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2746                 break;
2747         default:
2748                 break;
2749         }
2750 }
2751
2752 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2753 {
2754         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2755                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2756                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2757                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2758         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2759 }
2760
2761 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2762 {
2763         writel(enable ? 1 : 0, vector->addr);
2764 }
2765
2766 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2767 {
2768         struct hclge_dev *hdev = data;
2769         u32 event_cause;
2770         u32 clearval;
2771
2772         hclge_enable_vector(&hdev->misc_vector, false);
2773         event_cause = hclge_check_event_cause(hdev, &clearval);
2774
2775         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2776         switch (event_cause) {
2777         case HCLGE_VECTOR0_EVENT_ERR:
2778                 /* we do not know what type of reset is required now. This could
2779                  * only be decided after we fetch the type of errors which
2780                  * caused this event. Therefore, we will do below for now:
2781                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2782                  *    have defered type of reset to be used.
2783                  * 2. Schedule the reset serivce task.
2784                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2785                  *    will fetch the correct type of reset.  This would be done
2786                  *    by first decoding the types of errors.
2787                  */
2788                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2789                 /* fall through */
2790         case HCLGE_VECTOR0_EVENT_RST:
2791                 hclge_reset_task_schedule(hdev);
2792                 break;
2793         case HCLGE_VECTOR0_EVENT_MBX:
2794                 /* If we are here then,
2795                  * 1. Either we are not handling any mbx task and we are not
2796                  *    scheduled as well
2797                  *                        OR
2798                  * 2. We could be handling a mbx task but nothing more is
2799                  *    scheduled.
2800                  * In both cases, we should schedule mbx task as there are more
2801                  * mbx messages reported by this interrupt.
2802                  */
2803                 hclge_mbx_task_schedule(hdev);
2804                 break;
2805         default:
2806                 dev_warn(&hdev->pdev->dev,
2807                          "received unknown or unhandled event of vector0\n");
2808                 break;
2809         }
2810
2811         /* clear the source of interrupt if it is not cause by reset */
2812         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2813                 hclge_clear_event_cause(hdev, event_cause, clearval);
2814                 hclge_enable_vector(&hdev->misc_vector, true);
2815         }
2816
2817         return IRQ_HANDLED;
2818 }
2819
2820 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2821 {
2822         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2823                 dev_warn(&hdev->pdev->dev,
2824                          "vector(vector_id %d) has been freed.\n", vector_id);
2825                 return;
2826         }
2827
2828         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2829         hdev->num_msi_left += 1;
2830         hdev->num_msi_used -= 1;
2831 }
2832
2833 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2834 {
2835         struct hclge_misc_vector *vector = &hdev->misc_vector;
2836
2837         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2838
2839         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2840         hdev->vector_status[0] = 0;
2841
2842         hdev->num_msi_left -= 1;
2843         hdev->num_msi_used += 1;
2844 }
2845
2846 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2847 {
2848         int ret;
2849
2850         hclge_get_misc_vector(hdev);
2851
2852         /* this would be explicitly freed in the end */
2853         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2854                           0, "hclge_misc", hdev);
2855         if (ret) {
2856                 hclge_free_vector(hdev, 0);
2857                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2858                         hdev->misc_vector.vector_irq);
2859         }
2860
2861         return ret;
2862 }
2863
2864 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2865 {
2866         free_irq(hdev->misc_vector.vector_irq, hdev);
2867         hclge_free_vector(hdev, 0);
2868 }
2869
2870 int hclge_notify_client(struct hclge_dev *hdev,
2871                         enum hnae3_reset_notify_type type)
2872 {
2873         struct hnae3_client *client = hdev->nic_client;
2874         u16 i;
2875
2876         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2877             !client)
2878                 return 0;
2879
2880         if (!client->ops->reset_notify)
2881                 return -EOPNOTSUPP;
2882
2883         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2884                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2885                 int ret;
2886
2887                 ret = client->ops->reset_notify(handle, type);
2888                 if (ret) {
2889                         dev_err(&hdev->pdev->dev,
2890                                 "notify nic client failed %d(%d)\n", type, ret);
2891                         return ret;
2892                 }
2893         }
2894
2895         return 0;
2896 }
2897
2898 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2899                                     enum hnae3_reset_notify_type type)
2900 {
2901         struct hnae3_client *client = hdev->roce_client;
2902         int ret = 0;
2903         u16 i;
2904
2905         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
2906             !client)
2907                 return 0;
2908
2909         if (!client->ops->reset_notify)
2910                 return -EOPNOTSUPP;
2911
2912         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2913                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2914
2915                 ret = client->ops->reset_notify(handle, type);
2916                 if (ret) {
2917                         dev_err(&hdev->pdev->dev,
2918                                 "notify roce client failed %d(%d)",
2919                                 type, ret);
2920                         return ret;
2921                 }
2922         }
2923
2924         return ret;
2925 }
2926
2927 static int hclge_reset_wait(struct hclge_dev *hdev)
2928 {
2929 #define HCLGE_RESET_WATI_MS     100
2930 #define HCLGE_RESET_WAIT_CNT    200
2931         u32 val, reg, reg_bit;
2932         u32 cnt = 0;
2933
2934         switch (hdev->reset_type) {
2935         case HNAE3_IMP_RESET:
2936                 reg = HCLGE_GLOBAL_RESET_REG;
2937                 reg_bit = HCLGE_IMP_RESET_BIT;
2938                 break;
2939         case HNAE3_GLOBAL_RESET:
2940                 reg = HCLGE_GLOBAL_RESET_REG;
2941                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2942                 break;
2943         case HNAE3_CORE_RESET:
2944                 reg = HCLGE_GLOBAL_RESET_REG;
2945                 reg_bit = HCLGE_CORE_RESET_BIT;
2946                 break;
2947         case HNAE3_FUNC_RESET:
2948                 reg = HCLGE_FUN_RST_ING;
2949                 reg_bit = HCLGE_FUN_RST_ING_B;
2950                 break;
2951         case HNAE3_FLR_RESET:
2952                 break;
2953         default:
2954                 dev_err(&hdev->pdev->dev,
2955                         "Wait for unsupported reset type: %d\n",
2956                         hdev->reset_type);
2957                 return -EINVAL;
2958         }
2959
2960         if (hdev->reset_type == HNAE3_FLR_RESET) {
2961                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2962                        cnt++ < HCLGE_RESET_WAIT_CNT)
2963                         msleep(HCLGE_RESET_WATI_MS);
2964
2965                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2966                         dev_err(&hdev->pdev->dev,
2967                                 "flr wait timeout: %d\n", cnt);
2968                         return -EBUSY;
2969                 }
2970
2971                 return 0;
2972         }
2973
2974         val = hclge_read_dev(&hdev->hw, reg);
2975         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2976                 msleep(HCLGE_RESET_WATI_MS);
2977                 val = hclge_read_dev(&hdev->hw, reg);
2978                 cnt++;
2979         }
2980
2981         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2982                 dev_warn(&hdev->pdev->dev,
2983                          "Wait for reset timeout: %d\n", hdev->reset_type);
2984                 return -EBUSY;
2985         }
2986
2987         return 0;
2988 }
2989
2990 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2991 {
2992         struct hclge_vf_rst_cmd *req;
2993         struct hclge_desc desc;
2994
2995         req = (struct hclge_vf_rst_cmd *)desc.data;
2996         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2997         req->dest_vfid = func_id;
2998
2999         if (reset)
3000                 req->vf_rst = 0x1;
3001
3002         return hclge_cmd_send(&hdev->hw, &desc, 1);
3003 }
3004
3005 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3006 {
3007         int i;
3008
3009         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3010                 struct hclge_vport *vport = &hdev->vport[i];
3011                 int ret;
3012
3013                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3014                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3015                 if (ret) {
3016                         dev_err(&hdev->pdev->dev,
3017                                 "set vf(%d) rst failed %d!\n",
3018                                 vport->vport_id, ret);
3019                         return ret;
3020                 }
3021
3022                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3023                         continue;
3024
3025                 /* Inform VF to process the reset.
3026                  * hclge_inform_reset_assert_to_vf may fail if VF
3027                  * driver is not loaded.
3028                  */
3029                 ret = hclge_inform_reset_assert_to_vf(vport);
3030                 if (ret)
3031                         dev_warn(&hdev->pdev->dev,
3032                                  "inform reset to vf(%d) failed %d!\n",
3033                                  vport->vport_id, ret);
3034         }
3035
3036         return 0;
3037 }
3038
3039 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3040 {
3041         struct hclge_desc desc;
3042         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3043         int ret;
3044
3045         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3046         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3047         req->fun_reset_vfid = func_id;
3048
3049         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3050         if (ret)
3051                 dev_err(&hdev->pdev->dev,
3052                         "send function reset cmd fail, status =%d\n", ret);
3053
3054         return ret;
3055 }
3056
3057 static void hclge_do_reset(struct hclge_dev *hdev)
3058 {
3059         struct hnae3_handle *handle = &hdev->vport[0].nic;
3060         struct pci_dev *pdev = hdev->pdev;
3061         u32 val;
3062
3063         if (hclge_get_hw_reset_stat(handle)) {
3064                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3065                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3066                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3067                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3068                 return;
3069         }
3070
3071         switch (hdev->reset_type) {
3072         case HNAE3_GLOBAL_RESET:
3073                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3074                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3075                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3076                 dev_info(&pdev->dev, "Global Reset requested\n");
3077                 break;
3078         case HNAE3_CORE_RESET:
3079                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3080                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3081                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3082                 dev_info(&pdev->dev, "Core Reset requested\n");
3083                 break;
3084         case HNAE3_FUNC_RESET:
3085                 dev_info(&pdev->dev, "PF Reset requested\n");
3086                 /* schedule again to check later */
3087                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3088                 hclge_reset_task_schedule(hdev);
3089                 break;
3090         case HNAE3_FLR_RESET:
3091                 dev_info(&pdev->dev, "FLR requested\n");
3092                 /* schedule again to check later */
3093                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3094                 hclge_reset_task_schedule(hdev);
3095                 break;
3096         default:
3097                 dev_warn(&pdev->dev,
3098                          "Unsupported reset type: %d\n", hdev->reset_type);
3099                 break;
3100         }
3101 }
3102
3103 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3104                                                    unsigned long *addr)
3105 {
3106         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3107
3108         /* first, resolve any unknown reset type to the known type(s) */
3109         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3110                 /* we will intentionally ignore any errors from this function
3111                  *  as we will end up in *some* reset request in any case
3112                  */
3113                 hclge_handle_hw_msix_error(hdev, addr);
3114                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3115                 /* We defered the clearing of the error event which caused
3116                  * interrupt since it was not posssible to do that in
3117                  * interrupt context (and this is the reason we introduced
3118                  * new UNKNOWN reset type). Now, the errors have been
3119                  * handled and cleared in hardware we can safely enable
3120                  * interrupts. This is an exception to the norm.
3121                  */
3122                 hclge_enable_vector(&hdev->misc_vector, true);
3123         }
3124
3125         /* return the highest priority reset level amongst all */
3126         if (test_bit(HNAE3_IMP_RESET, addr)) {
3127                 rst_level = HNAE3_IMP_RESET;
3128                 clear_bit(HNAE3_IMP_RESET, addr);
3129                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3130                 clear_bit(HNAE3_CORE_RESET, addr);
3131                 clear_bit(HNAE3_FUNC_RESET, addr);
3132         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3133                 rst_level = HNAE3_GLOBAL_RESET;
3134                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3135                 clear_bit(HNAE3_CORE_RESET, addr);
3136                 clear_bit(HNAE3_FUNC_RESET, addr);
3137         } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3138                 rst_level = HNAE3_CORE_RESET;
3139                 clear_bit(HNAE3_CORE_RESET, addr);
3140                 clear_bit(HNAE3_FUNC_RESET, addr);
3141         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3142                 rst_level = HNAE3_FUNC_RESET;
3143                 clear_bit(HNAE3_FUNC_RESET, addr);
3144         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3145                 rst_level = HNAE3_FLR_RESET;
3146                 clear_bit(HNAE3_FLR_RESET, addr);
3147         }
3148
3149         if (hdev->reset_type != HNAE3_NONE_RESET &&
3150             rst_level < hdev->reset_type)
3151                 return HNAE3_NONE_RESET;
3152
3153         return rst_level;
3154 }
3155
3156 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3157 {
3158         u32 clearval = 0;
3159
3160         switch (hdev->reset_type) {
3161         case HNAE3_IMP_RESET:
3162                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3163                 break;
3164         case HNAE3_GLOBAL_RESET:
3165                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3166                 break;
3167         case HNAE3_CORE_RESET:
3168                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3169                 break;
3170         default:
3171                 break;
3172         }
3173
3174         if (!clearval)
3175                 return;
3176
3177         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3178         hclge_enable_vector(&hdev->misc_vector, true);
3179 }
3180
3181 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3182 {
3183         int ret = 0;
3184
3185         switch (hdev->reset_type) {
3186         case HNAE3_FUNC_RESET:
3187                 /* fall through */
3188         case HNAE3_FLR_RESET:
3189                 ret = hclge_set_all_vf_rst(hdev, true);
3190                 break;
3191         default:
3192                 break;
3193         }
3194
3195         return ret;
3196 }
3197
3198 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3199 {
3200 #define HCLGE_RESET_SYNC_TIME 100
3201
3202         u32 reg_val;
3203         int ret = 0;
3204
3205         switch (hdev->reset_type) {
3206         case HNAE3_FUNC_RESET:
3207                 /* There is no mechanism for PF to know if VF has stopped IO
3208                  * for now, just wait 100 ms for VF to stop IO
3209                  */
3210                 msleep(HCLGE_RESET_SYNC_TIME);
3211                 ret = hclge_func_reset_cmd(hdev, 0);
3212                 if (ret) {
3213                         dev_err(&hdev->pdev->dev,
3214                                 "asserting function reset fail %d!\n", ret);
3215                         return ret;
3216                 }
3217
3218                 /* After performaning pf reset, it is not necessary to do the
3219                  * mailbox handling or send any command to firmware, because
3220                  * any mailbox handling or command to firmware is only valid
3221                  * after hclge_cmd_init is called.
3222                  */
3223                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3224                 hdev->rst_stats.pf_rst_cnt++;
3225                 break;
3226         case HNAE3_FLR_RESET:
3227                 /* There is no mechanism for PF to know if VF has stopped IO
3228                  * for now, just wait 100 ms for VF to stop IO
3229                  */
3230                 msleep(HCLGE_RESET_SYNC_TIME);
3231                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3232                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3233                 hdev->rst_stats.flr_rst_cnt++;
3234                 break;
3235         case HNAE3_IMP_RESET:
3236                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3237                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3238                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3239                 break;
3240         default:
3241                 break;
3242         }
3243
3244         /* inform hardware that preparatory work is done */
3245         msleep(HCLGE_RESET_SYNC_TIME);
3246         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3247                         HCLGE_NIC_CMQ_ENABLE);
3248         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3249
3250         return ret;
3251 }
3252
3253 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3254 {
3255 #define MAX_RESET_FAIL_CNT 5
3256 #define RESET_UPGRADE_DELAY_SEC 10
3257
3258         if (hdev->reset_pending) {
3259                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3260                          hdev->reset_pending);
3261                 return true;
3262         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3263                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3264                     BIT(HCLGE_IMP_RESET_BIT))) {
3265                 dev_info(&hdev->pdev->dev,
3266                          "reset failed because IMP Reset is pending\n");
3267                 hclge_clear_reset_cause(hdev);
3268                 return false;
3269         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3270                 hdev->reset_fail_cnt++;
3271                 if (is_timeout) {
3272                         set_bit(hdev->reset_type, &hdev->reset_pending);
3273                         dev_info(&hdev->pdev->dev,
3274                                  "re-schedule to wait for hw reset done\n");
3275                         return true;
3276                 }
3277
3278                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3279                 hclge_clear_reset_cause(hdev);
3280                 mod_timer(&hdev->reset_timer,
3281                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3282
3283                 return false;
3284         }
3285
3286         hclge_clear_reset_cause(hdev);
3287         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3288         return false;
3289 }
3290
3291 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3292 {
3293         int ret = 0;
3294
3295         switch (hdev->reset_type) {
3296         case HNAE3_FUNC_RESET:
3297                 /* fall through */
3298         case HNAE3_FLR_RESET:
3299                 ret = hclge_set_all_vf_rst(hdev, false);
3300                 break;
3301         default:
3302                 break;
3303         }
3304
3305         return ret;
3306 }
3307
3308 static void hclge_reset(struct hclge_dev *hdev)
3309 {
3310         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3311         bool is_timeout = false;
3312         int ret;
3313
3314         /* Initialize ae_dev reset status as well, in case enet layer wants to
3315          * know if device is undergoing reset
3316          */
3317         ae_dev->reset_type = hdev->reset_type;
3318         hdev->rst_stats.reset_cnt++;
3319         /* perform reset of the stack & ae device for a client */
3320         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3321         if (ret)
3322                 goto err_reset;
3323
3324         ret = hclge_reset_prepare_down(hdev);
3325         if (ret)
3326                 goto err_reset;
3327
3328         rtnl_lock();
3329         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3330         if (ret)
3331                 goto err_reset_lock;
3332
3333         rtnl_unlock();
3334
3335         ret = hclge_reset_prepare_wait(hdev);
3336         if (ret)
3337                 goto err_reset;
3338
3339         if (hclge_reset_wait(hdev)) {
3340                 is_timeout = true;
3341                 goto err_reset;
3342         }
3343
3344         hdev->rst_stats.hw_reset_done_cnt++;
3345
3346         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3347         if (ret)
3348                 goto err_reset;
3349
3350         rtnl_lock();
3351         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3352         if (ret)
3353                 goto err_reset_lock;
3354
3355         ret = hclge_reset_ae_dev(hdev->ae_dev);
3356         if (ret)
3357                 goto err_reset_lock;
3358
3359         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3360         if (ret)
3361                 goto err_reset_lock;
3362
3363         ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3364         if (ret)
3365                 goto err_reset_lock;
3366
3367         hclge_clear_reset_cause(hdev);
3368
3369         ret = hclge_reset_prepare_up(hdev);
3370         if (ret)
3371                 goto err_reset_lock;
3372
3373         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3374         if (ret)
3375                 goto err_reset_lock;
3376
3377         rtnl_unlock();
3378
3379         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3380         if (ret)
3381                 goto err_reset;
3382
3383         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3384         if (ret)
3385                 goto err_reset;
3386
3387         hdev->last_reset_time = jiffies;
3388         hdev->reset_fail_cnt = 0;
3389         hdev->rst_stats.reset_done_cnt++;
3390         ae_dev->reset_type = HNAE3_NONE_RESET;
3391         del_timer(&hdev->reset_timer);
3392
3393         return;
3394
3395 err_reset_lock:
3396         rtnl_unlock();
3397 err_reset:
3398         if (hclge_reset_err_handle(hdev, is_timeout))
3399                 hclge_reset_task_schedule(hdev);
3400 }
3401
3402 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3403 {
3404         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3405         struct hclge_dev *hdev = ae_dev->priv;
3406
3407         /* We might end up getting called broadly because of 2 below cases:
3408          * 1. Recoverable error was conveyed through APEI and only way to bring
3409          *    normalcy is to reset.
3410          * 2. A new reset request from the stack due to timeout
3411          *
3412          * For the first case,error event might not have ae handle available.
3413          * check if this is a new reset request and we are not here just because
3414          * last reset attempt did not succeed and watchdog hit us again. We will
3415          * know this if last reset request did not occur very recently (watchdog
3416          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3417          * In case of new request we reset the "reset level" to PF reset.
3418          * And if it is a repeat reset request of the most recent one then we
3419          * want to make sure we throttle the reset request. Therefore, we will
3420          * not allow it again before 3*HZ times.
3421          */
3422         if (!handle)
3423                 handle = &hdev->vport[0].nic;
3424
3425         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3426                 return;
3427         else if (hdev->default_reset_request)
3428                 hdev->reset_level =
3429                         hclge_get_reset_level(hdev,
3430                                               &hdev->default_reset_request);
3431         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3432                 hdev->reset_level = HNAE3_FUNC_RESET;
3433
3434         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3435                  hdev->reset_level);
3436
3437         /* request reset & schedule reset task */
3438         set_bit(hdev->reset_level, &hdev->reset_request);
3439         hclge_reset_task_schedule(hdev);
3440
3441         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3442                 hdev->reset_level++;
3443 }
3444
3445 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3446                                         enum hnae3_reset_type rst_type)
3447 {
3448         struct hclge_dev *hdev = ae_dev->priv;
3449
3450         set_bit(rst_type, &hdev->default_reset_request);
3451 }
3452
3453 static void hclge_reset_timer(struct timer_list *t)
3454 {
3455         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3456
3457         dev_info(&hdev->pdev->dev,
3458                  "triggering global reset in reset timer\n");
3459         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3460         hclge_reset_event(hdev->pdev, NULL);
3461 }
3462
3463 static void hclge_reset_subtask(struct hclge_dev *hdev)
3464 {
3465         /* check if there is any ongoing reset in the hardware. This status can
3466          * be checked from reset_pending. If there is then, we need to wait for
3467          * hardware to complete reset.
3468          *    a. If we are able to figure out in reasonable time that hardware
3469          *       has fully resetted then, we can proceed with driver, client
3470          *       reset.
3471          *    b. else, we can come back later to check this status so re-sched
3472          *       now.
3473          */
3474         hdev->last_reset_time = jiffies;
3475         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3476         if (hdev->reset_type != HNAE3_NONE_RESET)
3477                 hclge_reset(hdev);
3478
3479         /* check if we got any *new* reset requests to be honored */
3480         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3481         if (hdev->reset_type != HNAE3_NONE_RESET)
3482                 hclge_do_reset(hdev);
3483
3484         hdev->reset_type = HNAE3_NONE_RESET;
3485 }
3486
3487 static void hclge_reset_service_task(struct work_struct *work)
3488 {
3489         struct hclge_dev *hdev =
3490                 container_of(work, struct hclge_dev, rst_service_task);
3491
3492         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3493                 return;
3494
3495         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3496
3497         hclge_reset_subtask(hdev);
3498
3499         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3500 }
3501
3502 static void hclge_mailbox_service_task(struct work_struct *work)
3503 {
3504         struct hclge_dev *hdev =
3505                 container_of(work, struct hclge_dev, mbx_service_task);
3506
3507         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3508                 return;
3509
3510         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3511
3512         hclge_mbx_handler(hdev);
3513
3514         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3515 }
3516
3517 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3518 {
3519         int i;
3520
3521         /* start from vport 1 for PF is always alive */
3522         for (i = 1; i < hdev->num_alloc_vport; i++) {
3523                 struct hclge_vport *vport = &hdev->vport[i];
3524
3525                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3526                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3527
3528                 /* If vf is not alive, set to default value */
3529                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3530                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3531         }
3532 }
3533
3534 static void hclge_service_task(struct work_struct *work)
3535 {
3536         struct hclge_dev *hdev =
3537                 container_of(work, struct hclge_dev, service_task);
3538
3539         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3540                 hclge_update_stats_for_all(hdev);
3541                 hdev->hw_stats.stats_timer = 0;
3542         }
3543
3544         hclge_update_port_info(hdev);
3545         hclge_update_link_status(hdev);
3546         hclge_update_vport_alive(hdev);
3547         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3548                 hclge_rfs_filter_expire(hdev);
3549                 hdev->fd_arfs_expire_timer = 0;
3550         }
3551         hclge_service_complete(hdev);
3552 }
3553
3554 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3555 {
3556         /* VF handle has no client */
3557         if (!handle->client)
3558                 return container_of(handle, struct hclge_vport, nic);
3559         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3560                 return container_of(handle, struct hclge_vport, roce);
3561         else
3562                 return container_of(handle, struct hclge_vport, nic);
3563 }
3564
3565 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3566                             struct hnae3_vector_info *vector_info)
3567 {
3568         struct hclge_vport *vport = hclge_get_vport(handle);
3569         struct hnae3_vector_info *vector = vector_info;
3570         struct hclge_dev *hdev = vport->back;
3571         int alloc = 0;
3572         int i, j;
3573
3574         vector_num = min(hdev->num_msi_left, vector_num);
3575
3576         for (j = 0; j < vector_num; j++) {
3577                 for (i = 1; i < hdev->num_msi; i++) {
3578                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3579                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3580                                 vector->io_addr = hdev->hw.io_base +
3581                                         HCLGE_VECTOR_REG_BASE +
3582                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3583                                         vport->vport_id *
3584                                         HCLGE_VECTOR_VF_OFFSET;
3585                                 hdev->vector_status[i] = vport->vport_id;
3586                                 hdev->vector_irq[i] = vector->vector;
3587
3588                                 vector++;
3589                                 alloc++;
3590
3591                                 break;
3592                         }
3593                 }
3594         }
3595         hdev->num_msi_left -= alloc;
3596         hdev->num_msi_used += alloc;
3597
3598         return alloc;
3599 }
3600
3601 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3602 {
3603         int i;
3604
3605         for (i = 0; i < hdev->num_msi; i++)
3606                 if (vector == hdev->vector_irq[i])
3607                         return i;
3608
3609         return -EINVAL;
3610 }
3611
3612 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3613 {
3614         struct hclge_vport *vport = hclge_get_vport(handle);
3615         struct hclge_dev *hdev = vport->back;
3616         int vector_id;
3617
3618         vector_id = hclge_get_vector_index(hdev, vector);
3619         if (vector_id < 0) {
3620                 dev_err(&hdev->pdev->dev,
3621                         "Get vector index fail. vector_id =%d\n", vector_id);
3622                 return vector_id;
3623         }
3624
3625         hclge_free_vector(hdev, vector_id);
3626
3627         return 0;
3628 }
3629
3630 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3631 {
3632         return HCLGE_RSS_KEY_SIZE;
3633 }
3634
3635 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3636 {
3637         return HCLGE_RSS_IND_TBL_SIZE;
3638 }
3639
3640 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3641                                   const u8 hfunc, const u8 *key)
3642 {
3643         struct hclge_rss_config_cmd *req;
3644         struct hclge_desc desc;
3645         int key_offset;
3646         int key_size;
3647         int ret;
3648
3649         req = (struct hclge_rss_config_cmd *)desc.data;
3650
3651         for (key_offset = 0; key_offset < 3; key_offset++) {
3652                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3653                                            false);
3654
3655                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3656                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3657
3658                 if (key_offset == 2)
3659                         key_size =
3660                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3661                 else
3662                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3663
3664                 memcpy(req->hash_key,
3665                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3666
3667                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3668                 if (ret) {
3669                         dev_err(&hdev->pdev->dev,
3670                                 "Configure RSS config fail, status = %d\n",
3671                                 ret);
3672                         return ret;
3673                 }
3674         }
3675         return 0;
3676 }
3677
3678 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3679 {
3680         struct hclge_rss_indirection_table_cmd *req;
3681         struct hclge_desc desc;
3682         int i, j;
3683         int ret;
3684
3685         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3686
3687         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3688                 hclge_cmd_setup_basic_desc
3689                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3690
3691                 req->start_table_index =
3692                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3693                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3694
3695                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3696                         req->rss_result[j] =
3697                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3698
3699                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3700                 if (ret) {
3701                         dev_err(&hdev->pdev->dev,
3702                                 "Configure rss indir table fail,status = %d\n",
3703                                 ret);
3704                         return ret;
3705                 }
3706         }
3707         return 0;
3708 }
3709
3710 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3711                                  u16 *tc_size, u16 *tc_offset)
3712 {
3713         struct hclge_rss_tc_mode_cmd *req;
3714         struct hclge_desc desc;
3715         int ret;
3716         int i;
3717
3718         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3719         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3720
3721         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3722                 u16 mode = 0;
3723
3724                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3725                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3726                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3727                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3728                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3729
3730                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3731         }
3732
3733         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3734         if (ret)
3735                 dev_err(&hdev->pdev->dev,
3736                         "Configure rss tc mode fail, status = %d\n", ret);
3737
3738         return ret;
3739 }
3740
3741 static void hclge_get_rss_type(struct hclge_vport *vport)
3742 {
3743         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3744             vport->rss_tuple_sets.ipv4_udp_en ||
3745             vport->rss_tuple_sets.ipv4_sctp_en ||
3746             vport->rss_tuple_sets.ipv6_tcp_en ||
3747             vport->rss_tuple_sets.ipv6_udp_en ||
3748             vport->rss_tuple_sets.ipv6_sctp_en)
3749                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3750         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3751                  vport->rss_tuple_sets.ipv6_fragment_en)
3752                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3753         else
3754                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3755 }
3756
3757 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3758 {
3759         struct hclge_rss_input_tuple_cmd *req;
3760         struct hclge_desc desc;
3761         int ret;
3762
3763         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3764
3765         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3766
3767         /* Get the tuple cfg from pf */
3768         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3769         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3770         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3771         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3772         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3773         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3774         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3775         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3776         hclge_get_rss_type(&hdev->vport[0]);
3777         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3778         if (ret)
3779                 dev_err(&hdev->pdev->dev,
3780                         "Configure rss input fail, status = %d\n", ret);
3781         return ret;
3782 }
3783
3784 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3785                          u8 *key, u8 *hfunc)
3786 {
3787         struct hclge_vport *vport = hclge_get_vport(handle);
3788         int i;
3789
3790         /* Get hash algorithm */
3791         if (hfunc) {
3792                 switch (vport->rss_algo) {
3793                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3794                         *hfunc = ETH_RSS_HASH_TOP;
3795                         break;
3796                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3797                         *hfunc = ETH_RSS_HASH_XOR;
3798                         break;
3799                 default:
3800                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3801                         break;
3802                 }
3803         }
3804
3805         /* Get the RSS Key required by the user */
3806         if (key)
3807                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3808
3809         /* Get indirect table */
3810         if (indir)
3811                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3812                         indir[i] =  vport->rss_indirection_tbl[i];
3813
3814         return 0;
3815 }
3816
3817 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3818                          const  u8 *key, const  u8 hfunc)
3819 {
3820         struct hclge_vport *vport = hclge_get_vport(handle);
3821         struct hclge_dev *hdev = vport->back;
3822         u8 hash_algo;
3823         int ret, i;
3824
3825         /* Set the RSS Hash Key if specififed by the user */
3826         if (key) {
3827                 switch (hfunc) {
3828                 case ETH_RSS_HASH_TOP:
3829                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3830                         break;
3831                 case ETH_RSS_HASH_XOR:
3832                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3833                         break;
3834                 case ETH_RSS_HASH_NO_CHANGE:
3835                         hash_algo = vport->rss_algo;
3836                         break;
3837                 default:
3838                         return -EINVAL;
3839                 }
3840
3841                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3842                 if (ret)
3843                         return ret;
3844
3845                 /* Update the shadow RSS key with user specified qids */
3846                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3847                 vport->rss_algo = hash_algo;
3848         }
3849
3850         /* Update the shadow RSS table with user specified qids */
3851         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3852                 vport->rss_indirection_tbl[i] = indir[i];
3853
3854         /* Update the hardware */
3855         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3856 }
3857
3858 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3859 {
3860         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3861
3862         if (nfc->data & RXH_L4_B_2_3)
3863                 hash_sets |= HCLGE_D_PORT_BIT;
3864         else
3865                 hash_sets &= ~HCLGE_D_PORT_BIT;
3866
3867         if (nfc->data & RXH_IP_SRC)
3868                 hash_sets |= HCLGE_S_IP_BIT;
3869         else
3870                 hash_sets &= ~HCLGE_S_IP_BIT;
3871
3872         if (nfc->data & RXH_IP_DST)
3873                 hash_sets |= HCLGE_D_IP_BIT;
3874         else
3875                 hash_sets &= ~HCLGE_D_IP_BIT;
3876
3877         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3878                 hash_sets |= HCLGE_V_TAG_BIT;
3879
3880         return hash_sets;
3881 }
3882
3883 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3884                                struct ethtool_rxnfc *nfc)
3885 {
3886         struct hclge_vport *vport = hclge_get_vport(handle);
3887         struct hclge_dev *hdev = vport->back;
3888         struct hclge_rss_input_tuple_cmd *req;
3889         struct hclge_desc desc;
3890         u8 tuple_sets;
3891         int ret;
3892
3893         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3894                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3895                 return -EINVAL;
3896
3897         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3898         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3899
3900         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3901         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3902         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3903         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3904         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3905         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3906         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3907         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3908
3909         tuple_sets = hclge_get_rss_hash_bits(nfc);
3910         switch (nfc->flow_type) {
3911         case TCP_V4_FLOW:
3912                 req->ipv4_tcp_en = tuple_sets;
3913                 break;
3914         case TCP_V6_FLOW:
3915                 req->ipv6_tcp_en = tuple_sets;
3916                 break;
3917         case UDP_V4_FLOW:
3918                 req->ipv4_udp_en = tuple_sets;
3919                 break;
3920         case UDP_V6_FLOW:
3921                 req->ipv6_udp_en = tuple_sets;
3922                 break;
3923         case SCTP_V4_FLOW:
3924                 req->ipv4_sctp_en = tuple_sets;
3925                 break;
3926         case SCTP_V6_FLOW:
3927                 if ((nfc->data & RXH_L4_B_0_1) ||
3928                     (nfc->data & RXH_L4_B_2_3))
3929                         return -EINVAL;
3930
3931                 req->ipv6_sctp_en = tuple_sets;
3932                 break;
3933         case IPV4_FLOW:
3934                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3935                 break;
3936         case IPV6_FLOW:
3937                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3938                 break;
3939         default:
3940                 return -EINVAL;
3941         }
3942
3943         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3944         if (ret) {
3945                 dev_err(&hdev->pdev->dev,
3946                         "Set rss tuple fail, status = %d\n", ret);
3947                 return ret;
3948         }
3949
3950         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3951         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3952         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3953         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3954         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3955         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3956         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3957         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3958         hclge_get_rss_type(vport);
3959         return 0;
3960 }
3961
3962 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3963                                struct ethtool_rxnfc *nfc)
3964 {
3965         struct hclge_vport *vport = hclge_get_vport(handle);
3966         u8 tuple_sets;
3967
3968         nfc->data = 0;
3969
3970         switch (nfc->flow_type) {
3971         case TCP_V4_FLOW:
3972                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3973                 break;
3974         case UDP_V4_FLOW:
3975                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3976                 break;
3977         case TCP_V6_FLOW:
3978                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3979                 break;
3980         case UDP_V6_FLOW:
3981                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3982                 break;
3983         case SCTP_V4_FLOW:
3984                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3985                 break;
3986         case SCTP_V6_FLOW:
3987                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3988                 break;
3989         case IPV4_FLOW:
3990         case IPV6_FLOW:
3991                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3992                 break;
3993         default:
3994                 return -EINVAL;
3995         }
3996
3997         if (!tuple_sets)
3998                 return 0;
3999
4000         if (tuple_sets & HCLGE_D_PORT_BIT)
4001                 nfc->data |= RXH_L4_B_2_3;
4002         if (tuple_sets & HCLGE_S_PORT_BIT)
4003                 nfc->data |= RXH_L4_B_0_1;
4004         if (tuple_sets & HCLGE_D_IP_BIT)
4005                 nfc->data |= RXH_IP_DST;
4006         if (tuple_sets & HCLGE_S_IP_BIT)
4007                 nfc->data |= RXH_IP_SRC;
4008
4009         return 0;
4010 }
4011
4012 static int hclge_get_tc_size(struct hnae3_handle *handle)
4013 {
4014         struct hclge_vport *vport = hclge_get_vport(handle);
4015         struct hclge_dev *hdev = vport->back;
4016
4017         return hdev->rss_size_max;
4018 }
4019
4020 int hclge_rss_init_hw(struct hclge_dev *hdev)
4021 {
4022         struct hclge_vport *vport = hdev->vport;
4023         u8 *rss_indir = vport[0].rss_indirection_tbl;
4024         u16 rss_size = vport[0].alloc_rss_size;
4025         u8 *key = vport[0].rss_hash_key;
4026         u8 hfunc = vport[0].rss_algo;
4027         u16 tc_offset[HCLGE_MAX_TC_NUM];
4028         u16 tc_valid[HCLGE_MAX_TC_NUM];
4029         u16 tc_size[HCLGE_MAX_TC_NUM];
4030         u16 roundup_size;
4031         int i, ret;
4032
4033         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4034         if (ret)
4035                 return ret;
4036
4037         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4038         if (ret)
4039                 return ret;
4040
4041         ret = hclge_set_rss_input_tuple(hdev);
4042         if (ret)
4043                 return ret;
4044
4045         /* Each TC have the same queue size, and tc_size set to hardware is
4046          * the log2 of roundup power of two of rss_size, the acutal queue
4047          * size is limited by indirection table.
4048          */
4049         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4050                 dev_err(&hdev->pdev->dev,
4051                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4052                         rss_size);
4053                 return -EINVAL;
4054         }
4055
4056         roundup_size = roundup_pow_of_two(rss_size);
4057         roundup_size = ilog2(roundup_size);
4058
4059         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4060                 tc_valid[i] = 0;
4061
4062                 if (!(hdev->hw_tc_map & BIT(i)))
4063                         continue;
4064
4065                 tc_valid[i] = 1;
4066                 tc_size[i] = roundup_size;
4067                 tc_offset[i] = rss_size * i;
4068         }
4069
4070         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4071 }
4072
4073 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4074 {
4075         struct hclge_vport *vport = hdev->vport;
4076         int i, j;
4077
4078         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4079                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4080                         vport[j].rss_indirection_tbl[i] =
4081                                 i % vport[j].alloc_rss_size;
4082         }
4083 }
4084
4085 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4086 {
4087         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4088         struct hclge_vport *vport = hdev->vport;
4089
4090         if (hdev->pdev->revision >= 0x21)
4091                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4092
4093         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4094                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4095                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4096                 vport[i].rss_tuple_sets.ipv4_udp_en =
4097                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4098                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4099                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4100                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4101                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4102                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4103                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4104                 vport[i].rss_tuple_sets.ipv6_udp_en =
4105                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4106                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4107                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4108                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4109                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4110
4111                 vport[i].rss_algo = rss_algo;
4112
4113                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4114                        HCLGE_RSS_KEY_SIZE);
4115         }
4116
4117         hclge_rss_indir_init_cfg(hdev);
4118 }
4119
4120 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4121                                 int vector_id, bool en,
4122                                 struct hnae3_ring_chain_node *ring_chain)
4123 {
4124         struct hclge_dev *hdev = vport->back;
4125         struct hnae3_ring_chain_node *node;
4126         struct hclge_desc desc;
4127         struct hclge_ctrl_vector_chain_cmd *req
4128                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4129         enum hclge_cmd_status status;
4130         enum hclge_opcode_type op;
4131         u16 tqp_type_and_id;
4132         int i;
4133
4134         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4135         hclge_cmd_setup_basic_desc(&desc, op, false);
4136         req->int_vector_id = vector_id;
4137
4138         i = 0;
4139         for (node = ring_chain; node; node = node->next) {
4140                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4141                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4142                                 HCLGE_INT_TYPE_S,
4143                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4144                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4145                                 HCLGE_TQP_ID_S, node->tqp_index);
4146                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4147                                 HCLGE_INT_GL_IDX_S,
4148                                 hnae3_get_field(node->int_gl_idx,
4149                                                 HNAE3_RING_GL_IDX_M,
4150                                                 HNAE3_RING_GL_IDX_S));
4151                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4152                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4153                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4154                         req->vfid = vport->vport_id;
4155
4156                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4157                         if (status) {
4158                                 dev_err(&hdev->pdev->dev,
4159                                         "Map TQP fail, status is %d.\n",
4160                                         status);
4161                                 return -EIO;
4162                         }
4163                         i = 0;
4164
4165                         hclge_cmd_setup_basic_desc(&desc,
4166                                                    op,
4167                                                    false);
4168                         req->int_vector_id = vector_id;
4169                 }
4170         }
4171
4172         if (i > 0) {
4173                 req->int_cause_num = i;
4174                 req->vfid = vport->vport_id;
4175                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4176                 if (status) {
4177                         dev_err(&hdev->pdev->dev,
4178                                 "Map TQP fail, status is %d.\n", status);
4179                         return -EIO;
4180                 }
4181         }
4182
4183         return 0;
4184 }
4185
4186 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4187                                     int vector,
4188                                     struct hnae3_ring_chain_node *ring_chain)
4189 {
4190         struct hclge_vport *vport = hclge_get_vport(handle);
4191         struct hclge_dev *hdev = vport->back;
4192         int vector_id;
4193
4194         vector_id = hclge_get_vector_index(hdev, vector);
4195         if (vector_id < 0) {
4196                 dev_err(&hdev->pdev->dev,
4197                         "Get vector index fail. vector_id =%d\n", vector_id);
4198                 return vector_id;
4199         }
4200
4201         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4202 }
4203
4204 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4205                                        int vector,
4206                                        struct hnae3_ring_chain_node *ring_chain)
4207 {
4208         struct hclge_vport *vport = hclge_get_vport(handle);
4209         struct hclge_dev *hdev = vport->back;
4210         int vector_id, ret;
4211
4212         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4213                 return 0;
4214
4215         vector_id = hclge_get_vector_index(hdev, vector);
4216         if (vector_id < 0) {
4217                 dev_err(&handle->pdev->dev,
4218                         "Get vector index fail. ret =%d\n", vector_id);
4219                 return vector_id;
4220         }
4221
4222         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4223         if (ret)
4224                 dev_err(&handle->pdev->dev,
4225                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4226                         vector_id,
4227                         ret);
4228
4229         return ret;
4230 }
4231
4232 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4233                                struct hclge_promisc_param *param)
4234 {
4235         struct hclge_promisc_cfg_cmd *req;
4236         struct hclge_desc desc;
4237         int ret;
4238
4239         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4240
4241         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4242         req->vf_id = param->vf_id;
4243
4244         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4245          * pdev revision(0x20), new revision support them. The
4246          * value of this two fields will not return error when driver
4247          * send command to fireware in revision(0x20).
4248          */
4249         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4250                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4251
4252         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4253         if (ret)
4254                 dev_err(&hdev->pdev->dev,
4255                         "Set promisc mode fail, status is %d.\n", ret);
4256
4257         return ret;
4258 }
4259
4260 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4261                               bool en_mc, bool en_bc, int vport_id)
4262 {
4263         if (!param)
4264                 return;
4265
4266         memset(param, 0, sizeof(struct hclge_promisc_param));
4267         if (en_uc)
4268                 param->enable = HCLGE_PROMISC_EN_UC;
4269         if (en_mc)
4270                 param->enable |= HCLGE_PROMISC_EN_MC;
4271         if (en_bc)
4272                 param->enable |= HCLGE_PROMISC_EN_BC;
4273         param->vf_id = vport_id;
4274 }
4275
4276 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4277                                   bool en_mc_pmc)
4278 {
4279         struct hclge_vport *vport = hclge_get_vport(handle);
4280         struct hclge_dev *hdev = vport->back;
4281         struct hclge_promisc_param param;
4282         bool en_bc_pmc = true;
4283
4284         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4285          * always bypassed. So broadcast promisc should be disabled until
4286          * user enable promisc mode
4287          */
4288         if (handle->pdev->revision == 0x20)
4289                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4290
4291         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4292                                  vport->vport_id);
4293         return hclge_cmd_set_promisc_mode(hdev, &param);
4294 }
4295
4296 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4297 {
4298         struct hclge_get_fd_mode_cmd *req;
4299         struct hclge_desc desc;
4300         int ret;
4301
4302         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4303
4304         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4305
4306         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4307         if (ret) {
4308                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4309                 return ret;
4310         }
4311
4312         *fd_mode = req->mode;
4313
4314         return ret;
4315 }
4316
4317 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4318                                    u32 *stage1_entry_num,
4319                                    u32 *stage2_entry_num,
4320                                    u16 *stage1_counter_num,
4321                                    u16 *stage2_counter_num)
4322 {
4323         struct hclge_get_fd_allocation_cmd *req;
4324         struct hclge_desc desc;
4325         int ret;
4326
4327         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4328
4329         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4330
4331         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4332         if (ret) {
4333                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4334                         ret);
4335                 return ret;
4336         }
4337
4338         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4339         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4340         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4341         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4342
4343         return ret;
4344 }
4345
4346 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4347 {
4348         struct hclge_set_fd_key_config_cmd *req;
4349         struct hclge_fd_key_cfg *stage;
4350         struct hclge_desc desc;
4351         int ret;
4352
4353         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4354
4355         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4356         stage = &hdev->fd_cfg.key_cfg[stage_num];
4357         req->stage = stage_num;
4358         req->key_select = stage->key_sel;
4359         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4360         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4361         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4362         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4363         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4364         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4365
4366         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4367         if (ret)
4368                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4369
4370         return ret;
4371 }
4372
4373 static int hclge_init_fd_config(struct hclge_dev *hdev)
4374 {
4375 #define LOW_2_WORDS             0x03
4376         struct hclge_fd_key_cfg *key_cfg;
4377         int ret;
4378
4379         if (!hnae3_dev_fd_supported(hdev))
4380                 return 0;
4381
4382         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4383         if (ret)
4384                 return ret;
4385
4386         switch (hdev->fd_cfg.fd_mode) {
4387         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4388                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4389                 break;
4390         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4391                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4392                 break;
4393         default:
4394                 dev_err(&hdev->pdev->dev,
4395                         "Unsupported flow director mode %d\n",
4396                         hdev->fd_cfg.fd_mode);
4397                 return -EOPNOTSUPP;
4398         }
4399
4400         hdev->fd_cfg.proto_support =
4401                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4402                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4403         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4404         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4405         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4406         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4407         key_cfg->outer_sipv6_word_en = 0;
4408         key_cfg->outer_dipv6_word_en = 0;
4409
4410         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4411                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4412                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4413                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4414
4415         /* If use max 400bit key, we can support tuples for ether type */
4416         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4417                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4418                 key_cfg->tuple_active |=
4419                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4420         }
4421
4422         /* roce_type is used to filter roce frames
4423          * dst_vport is used to specify the rule
4424          */
4425         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4426
4427         ret = hclge_get_fd_allocation(hdev,
4428                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4429                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4430                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4431                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4432         if (ret)
4433                 return ret;
4434
4435         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4436 }
4437
4438 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4439                                 int loc, u8 *key, bool is_add)
4440 {
4441         struct hclge_fd_tcam_config_1_cmd *req1;
4442         struct hclge_fd_tcam_config_2_cmd *req2;
4443         struct hclge_fd_tcam_config_3_cmd *req3;
4444         struct hclge_desc desc[3];
4445         int ret;
4446
4447         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4448         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4449         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4450         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4451         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4452
4453         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4454         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4455         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4456
4457         req1->stage = stage;
4458         req1->xy_sel = sel_x ? 1 : 0;
4459         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4460         req1->index = cpu_to_le32(loc);
4461         req1->entry_vld = sel_x ? is_add : 0;
4462
4463         if (key) {
4464                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4465                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4466                        sizeof(req2->tcam_data));
4467                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4468                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4469         }
4470
4471         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4472         if (ret)
4473                 dev_err(&hdev->pdev->dev,
4474                         "config tcam key fail, ret=%d\n",
4475                         ret);
4476
4477         return ret;
4478 }
4479
4480 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4481                               struct hclge_fd_ad_data *action)
4482 {
4483         struct hclge_fd_ad_config_cmd *req;
4484         struct hclge_desc desc;
4485         u64 ad_data = 0;
4486         int ret;
4487
4488         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4489
4490         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4491         req->index = cpu_to_le32(loc);
4492         req->stage = stage;
4493
4494         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4495                       action->write_rule_id_to_bd);
4496         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4497                         action->rule_id);
4498         ad_data <<= 32;
4499         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4500         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4501                       action->forward_to_direct_queue);
4502         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4503                         action->queue_id);
4504         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4505         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4506                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4507         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4508         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4509                         action->counter_id);
4510
4511         req->ad_data = cpu_to_le64(ad_data);
4512         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4513         if (ret)
4514                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4515
4516         return ret;
4517 }
4518
4519 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4520                                    struct hclge_fd_rule *rule)
4521 {
4522         u16 tmp_x_s, tmp_y_s;
4523         u32 tmp_x_l, tmp_y_l;
4524         int i;
4525
4526         if (rule->unused_tuple & tuple_bit)
4527                 return true;
4528
4529         switch (tuple_bit) {
4530         case 0:
4531                 return false;
4532         case BIT(INNER_DST_MAC):
4533                 for (i = 0; i < 6; i++) {
4534                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4535                                rule->tuples_mask.dst_mac[i]);
4536                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4537                                rule->tuples_mask.dst_mac[i]);
4538                 }
4539
4540                 return true;
4541         case BIT(INNER_SRC_MAC):
4542                 for (i = 0; i < 6; i++) {
4543                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4544                                rule->tuples.src_mac[i]);
4545                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4546                                rule->tuples.src_mac[i]);
4547                 }
4548
4549                 return true;
4550         case BIT(INNER_VLAN_TAG_FST):
4551                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4552                        rule->tuples_mask.vlan_tag1);
4553                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4554                        rule->tuples_mask.vlan_tag1);
4555                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4556                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4557
4558                 return true;
4559         case BIT(INNER_ETH_TYPE):
4560                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4561                        rule->tuples_mask.ether_proto);
4562                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4563                        rule->tuples_mask.ether_proto);
4564                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4565                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4566
4567                 return true;
4568         case BIT(INNER_IP_TOS):
4569                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4570                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4571
4572                 return true;
4573         case BIT(INNER_IP_PROTO):
4574                 calc_x(*key_x, rule->tuples.ip_proto,
4575                        rule->tuples_mask.ip_proto);
4576                 calc_y(*key_y, rule->tuples.ip_proto,
4577                        rule->tuples_mask.ip_proto);
4578
4579                 return true;
4580         case BIT(INNER_SRC_IP):
4581                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4582                        rule->tuples_mask.src_ip[3]);
4583                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4584                        rule->tuples_mask.src_ip[3]);
4585                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4586                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4587
4588                 return true;
4589         case BIT(INNER_DST_IP):
4590                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4591                        rule->tuples_mask.dst_ip[3]);
4592                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4593                        rule->tuples_mask.dst_ip[3]);
4594                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4595                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4596
4597                 return true;
4598         case BIT(INNER_SRC_PORT):
4599                 calc_x(tmp_x_s, rule->tuples.src_port,
4600                        rule->tuples_mask.src_port);
4601                 calc_y(tmp_y_s, rule->tuples.src_port,
4602                        rule->tuples_mask.src_port);
4603                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4604                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4605
4606                 return true;
4607         case BIT(INNER_DST_PORT):
4608                 calc_x(tmp_x_s, rule->tuples.dst_port,
4609                        rule->tuples_mask.dst_port);
4610                 calc_y(tmp_y_s, rule->tuples.dst_port,
4611                        rule->tuples_mask.dst_port);
4612                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4613                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4614
4615                 return true;
4616         default:
4617                 return false;
4618         }
4619 }
4620
4621 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4622                                  u8 vf_id, u8 network_port_id)
4623 {
4624         u32 port_number = 0;
4625
4626         if (port_type == HOST_PORT) {
4627                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4628                                 pf_id);
4629                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4630                                 vf_id);
4631                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4632         } else {
4633                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4634                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4635                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4636         }
4637
4638         return port_number;
4639 }
4640
4641 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4642                                        __le32 *key_x, __le32 *key_y,
4643                                        struct hclge_fd_rule *rule)
4644 {
4645         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4646         u8 cur_pos = 0, tuple_size, shift_bits;
4647         int i;
4648
4649         for (i = 0; i < MAX_META_DATA; i++) {
4650                 tuple_size = meta_data_key_info[i].key_length;
4651                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4652
4653                 switch (tuple_bit) {
4654                 case BIT(ROCE_TYPE):
4655                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4656                         cur_pos += tuple_size;
4657                         break;
4658                 case BIT(DST_VPORT):
4659                         port_number = hclge_get_port_number(HOST_PORT, 0,
4660                                                             rule->vf_id, 0);
4661                         hnae3_set_field(meta_data,
4662                                         GENMASK(cur_pos + tuple_size, cur_pos),
4663                                         cur_pos, port_number);
4664                         cur_pos += tuple_size;
4665                         break;
4666                 default:
4667                         break;
4668                 }
4669         }
4670
4671         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4672         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4673         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4674
4675         *key_x = cpu_to_le32(tmp_x << shift_bits);
4676         *key_y = cpu_to_le32(tmp_y << shift_bits);
4677 }
4678
4679 /* A complete key is combined with meta data key and tuple key.
4680  * Meta data key is stored at the MSB region, and tuple key is stored at
4681  * the LSB region, unused bits will be filled 0.
4682  */
4683 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4684                             struct hclge_fd_rule *rule)
4685 {
4686         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4687         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4688         u8 *cur_key_x, *cur_key_y;
4689         int i, ret, tuple_size;
4690         u8 meta_data_region;
4691
4692         memset(key_x, 0, sizeof(key_x));
4693         memset(key_y, 0, sizeof(key_y));
4694         cur_key_x = key_x;
4695         cur_key_y = key_y;
4696
4697         for (i = 0 ; i < MAX_TUPLE; i++) {
4698                 bool tuple_valid;
4699                 u32 check_tuple;
4700
4701                 tuple_size = tuple_key_info[i].key_length / 8;
4702                 check_tuple = key_cfg->tuple_active & BIT(i);
4703
4704                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4705                                                      cur_key_y, rule);
4706                 if (tuple_valid) {
4707                         cur_key_x += tuple_size;
4708                         cur_key_y += tuple_size;
4709                 }
4710         }
4711
4712         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4713                         MAX_META_DATA_LENGTH / 8;
4714
4715         hclge_fd_convert_meta_data(key_cfg,
4716                                    (__le32 *)(key_x + meta_data_region),
4717                                    (__le32 *)(key_y + meta_data_region),
4718                                    rule);
4719
4720         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4721                                    true);
4722         if (ret) {
4723                 dev_err(&hdev->pdev->dev,
4724                         "fd key_y config fail, loc=%d, ret=%d\n",
4725                         rule->queue_id, ret);
4726                 return ret;
4727         }
4728
4729         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4730                                    true);
4731         if (ret)
4732                 dev_err(&hdev->pdev->dev,
4733                         "fd key_x config fail, loc=%d, ret=%d\n",
4734                         rule->queue_id, ret);
4735         return ret;
4736 }
4737
4738 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4739                                struct hclge_fd_rule *rule)
4740 {
4741         struct hclge_fd_ad_data ad_data;
4742
4743         ad_data.ad_id = rule->location;
4744
4745         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4746                 ad_data.drop_packet = true;
4747                 ad_data.forward_to_direct_queue = false;
4748                 ad_data.queue_id = 0;
4749         } else {
4750                 ad_data.drop_packet = false;
4751                 ad_data.forward_to_direct_queue = true;
4752                 ad_data.queue_id = rule->queue_id;
4753         }
4754
4755         ad_data.use_counter = false;
4756         ad_data.counter_id = 0;
4757
4758         ad_data.use_next_stage = false;
4759         ad_data.next_input_key = 0;
4760
4761         ad_data.write_rule_id_to_bd = true;
4762         ad_data.rule_id = rule->location;
4763
4764         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4765 }
4766
4767 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4768                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4769 {
4770         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4771         struct ethtool_usrip4_spec *usr_ip4_spec;
4772         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4773         struct ethtool_usrip6_spec *usr_ip6_spec;
4774         struct ethhdr *ether_spec;
4775
4776         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4777                 return -EINVAL;
4778
4779         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4780                 return -EOPNOTSUPP;
4781
4782         if ((fs->flow_type & FLOW_EXT) &&
4783             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4784                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4785                 return -EOPNOTSUPP;
4786         }
4787
4788         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4789         case SCTP_V4_FLOW:
4790         case TCP_V4_FLOW:
4791         case UDP_V4_FLOW:
4792                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4793                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4794
4795                 if (!tcp_ip4_spec->ip4src)
4796                         *unused |= BIT(INNER_SRC_IP);
4797
4798                 if (!tcp_ip4_spec->ip4dst)
4799                         *unused |= BIT(INNER_DST_IP);
4800
4801                 if (!tcp_ip4_spec->psrc)
4802                         *unused |= BIT(INNER_SRC_PORT);
4803
4804                 if (!tcp_ip4_spec->pdst)
4805                         *unused |= BIT(INNER_DST_PORT);
4806
4807                 if (!tcp_ip4_spec->tos)
4808                         *unused |= BIT(INNER_IP_TOS);
4809
4810                 break;
4811         case IP_USER_FLOW:
4812                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4813                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4814                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4815
4816                 if (!usr_ip4_spec->ip4src)
4817                         *unused |= BIT(INNER_SRC_IP);
4818
4819                 if (!usr_ip4_spec->ip4dst)
4820                         *unused |= BIT(INNER_DST_IP);
4821
4822                 if (!usr_ip4_spec->tos)
4823                         *unused |= BIT(INNER_IP_TOS);
4824
4825                 if (!usr_ip4_spec->proto)
4826                         *unused |= BIT(INNER_IP_PROTO);
4827
4828                 if (usr_ip4_spec->l4_4_bytes)
4829                         return -EOPNOTSUPP;
4830
4831                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4832                         return -EOPNOTSUPP;
4833
4834                 break;
4835         case SCTP_V6_FLOW:
4836         case TCP_V6_FLOW:
4837         case UDP_V6_FLOW:
4838                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4839                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4840                         BIT(INNER_IP_TOS);
4841
4842                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4843                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4844                         *unused |= BIT(INNER_SRC_IP);
4845
4846                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4847                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4848                         *unused |= BIT(INNER_DST_IP);
4849
4850                 if (!tcp_ip6_spec->psrc)
4851                         *unused |= BIT(INNER_SRC_PORT);
4852
4853                 if (!tcp_ip6_spec->pdst)
4854                         *unused |= BIT(INNER_DST_PORT);
4855
4856                 if (tcp_ip6_spec->tclass)
4857                         return -EOPNOTSUPP;
4858
4859                 break;
4860         case IPV6_USER_FLOW:
4861                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4862                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4863                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4864                         BIT(INNER_DST_PORT);
4865
4866                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4867                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4868                         *unused |= BIT(INNER_SRC_IP);
4869
4870                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4871                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4872                         *unused |= BIT(INNER_DST_IP);
4873
4874                 if (!usr_ip6_spec->l4_proto)
4875                         *unused |= BIT(INNER_IP_PROTO);
4876
4877                 if (usr_ip6_spec->tclass)
4878                         return -EOPNOTSUPP;
4879
4880                 if (usr_ip6_spec->l4_4_bytes)
4881                         return -EOPNOTSUPP;
4882
4883                 break;
4884         case ETHER_FLOW:
4885                 ether_spec = &fs->h_u.ether_spec;
4886                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4887                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4888                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4889
4890                 if (is_zero_ether_addr(ether_spec->h_source))
4891                         *unused |= BIT(INNER_SRC_MAC);
4892
4893                 if (is_zero_ether_addr(ether_spec->h_dest))
4894                         *unused |= BIT(INNER_DST_MAC);
4895
4896                 if (!ether_spec->h_proto)
4897                         *unused |= BIT(INNER_ETH_TYPE);
4898
4899                 break;
4900         default:
4901                 return -EOPNOTSUPP;
4902         }
4903
4904         if ((fs->flow_type & FLOW_EXT)) {
4905                 if (fs->h_ext.vlan_etype)
4906                         return -EOPNOTSUPP;
4907                 if (!fs->h_ext.vlan_tci)
4908                         *unused |= BIT(INNER_VLAN_TAG_FST);
4909
4910                 if (fs->m_ext.vlan_tci) {
4911                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4912                                 return -EINVAL;
4913                 }
4914         } else {
4915                 *unused |= BIT(INNER_VLAN_TAG_FST);
4916         }
4917
4918         if (fs->flow_type & FLOW_MAC_EXT) {
4919                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4920                         return -EOPNOTSUPP;
4921
4922                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4923                         *unused |= BIT(INNER_DST_MAC);
4924                 else
4925                         *unused &= ~(BIT(INNER_DST_MAC));
4926         }
4927
4928         return 0;
4929 }
4930
4931 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4932 {
4933         struct hclge_fd_rule *rule = NULL;
4934         struct hlist_node *node2;
4935
4936         spin_lock_bh(&hdev->fd_rule_lock);
4937         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4938                 if (rule->location >= location)
4939                         break;
4940         }
4941
4942         spin_unlock_bh(&hdev->fd_rule_lock);
4943
4944         return  rule && rule->location == location;
4945 }
4946
4947 /* make sure being called after lock up with fd_rule_lock */
4948 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4949                                      struct hclge_fd_rule *new_rule,
4950                                      u16 location,
4951                                      bool is_add)
4952 {
4953         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4954         struct hlist_node *node2;
4955
4956         if (is_add && !new_rule)
4957                 return -EINVAL;
4958
4959         hlist_for_each_entry_safe(rule, node2,
4960                                   &hdev->fd_rule_list, rule_node) {
4961                 if (rule->location >= location)
4962                         break;
4963                 parent = rule;
4964         }
4965
4966         if (rule && rule->location == location) {
4967                 hlist_del(&rule->rule_node);
4968                 kfree(rule);
4969                 hdev->hclge_fd_rule_num--;
4970
4971                 if (!is_add) {
4972                         if (!hdev->hclge_fd_rule_num)
4973                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4974                         clear_bit(location, hdev->fd_bmap);
4975
4976                         return 0;
4977                 }
4978         } else if (!is_add) {
4979                 dev_err(&hdev->pdev->dev,
4980                         "delete fail, rule %d is inexistent\n",
4981                         location);
4982                 return -EINVAL;
4983         }
4984
4985         INIT_HLIST_NODE(&new_rule->rule_node);
4986
4987         if (parent)
4988                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4989         else
4990                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4991
4992         set_bit(location, hdev->fd_bmap);
4993         hdev->hclge_fd_rule_num++;
4994         hdev->fd_active_type = new_rule->rule_type;
4995
4996         return 0;
4997 }
4998
4999 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5000                               struct ethtool_rx_flow_spec *fs,
5001                               struct hclge_fd_rule *rule)
5002 {
5003         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5004
5005         switch (flow_type) {
5006         case SCTP_V4_FLOW:
5007         case TCP_V4_FLOW:
5008         case UDP_V4_FLOW:
5009                 rule->tuples.src_ip[3] =
5010                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5011                 rule->tuples_mask.src_ip[3] =
5012                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5013
5014                 rule->tuples.dst_ip[3] =
5015                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5016                 rule->tuples_mask.dst_ip[3] =
5017                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5018
5019                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5020                 rule->tuples_mask.src_port =
5021                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5022
5023                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5024                 rule->tuples_mask.dst_port =
5025                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5026
5027                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5028                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5029
5030                 rule->tuples.ether_proto = ETH_P_IP;
5031                 rule->tuples_mask.ether_proto = 0xFFFF;
5032
5033                 break;
5034         case IP_USER_FLOW:
5035                 rule->tuples.src_ip[3] =
5036                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5037                 rule->tuples_mask.src_ip[3] =
5038                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5039
5040                 rule->tuples.dst_ip[3] =
5041                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5042                 rule->tuples_mask.dst_ip[3] =
5043                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5044
5045                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5046                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5047
5048                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5049                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5050
5051                 rule->tuples.ether_proto = ETH_P_IP;
5052                 rule->tuples_mask.ether_proto = 0xFFFF;
5053
5054                 break;
5055         case SCTP_V6_FLOW:
5056         case TCP_V6_FLOW:
5057         case UDP_V6_FLOW:
5058                 be32_to_cpu_array(rule->tuples.src_ip,
5059                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
5060                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5061                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
5062
5063                 be32_to_cpu_array(rule->tuples.dst_ip,
5064                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
5065                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5066                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
5067
5068                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5069                 rule->tuples_mask.src_port =
5070                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5071
5072                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5073                 rule->tuples_mask.dst_port =
5074                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5075
5076                 rule->tuples.ether_proto = ETH_P_IPV6;
5077                 rule->tuples_mask.ether_proto = 0xFFFF;
5078
5079                 break;
5080         case IPV6_USER_FLOW:
5081                 be32_to_cpu_array(rule->tuples.src_ip,
5082                                   fs->h_u.usr_ip6_spec.ip6src, 4);
5083                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5084                                   fs->m_u.usr_ip6_spec.ip6src, 4);
5085
5086                 be32_to_cpu_array(rule->tuples.dst_ip,
5087                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
5088                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5089                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
5090
5091                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5092                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5093
5094                 rule->tuples.ether_proto = ETH_P_IPV6;
5095                 rule->tuples_mask.ether_proto = 0xFFFF;
5096
5097                 break;
5098         case ETHER_FLOW:
5099                 ether_addr_copy(rule->tuples.src_mac,
5100                                 fs->h_u.ether_spec.h_source);
5101                 ether_addr_copy(rule->tuples_mask.src_mac,
5102                                 fs->m_u.ether_spec.h_source);
5103
5104                 ether_addr_copy(rule->tuples.dst_mac,
5105                                 fs->h_u.ether_spec.h_dest);
5106                 ether_addr_copy(rule->tuples_mask.dst_mac,
5107                                 fs->m_u.ether_spec.h_dest);
5108
5109                 rule->tuples.ether_proto =
5110                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5111                 rule->tuples_mask.ether_proto =
5112                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5113
5114                 break;
5115         default:
5116                 return -EOPNOTSUPP;
5117         }
5118
5119         switch (flow_type) {
5120         case SCTP_V4_FLOW:
5121         case SCTP_V6_FLOW:
5122                 rule->tuples.ip_proto = IPPROTO_SCTP;
5123                 rule->tuples_mask.ip_proto = 0xFF;
5124                 break;
5125         case TCP_V4_FLOW:
5126         case TCP_V6_FLOW:
5127                 rule->tuples.ip_proto = IPPROTO_TCP;
5128                 rule->tuples_mask.ip_proto = 0xFF;
5129                 break;
5130         case UDP_V4_FLOW:
5131         case UDP_V6_FLOW:
5132                 rule->tuples.ip_proto = IPPROTO_UDP;
5133                 rule->tuples_mask.ip_proto = 0xFF;
5134                 break;
5135         default:
5136                 break;
5137         }
5138
5139         if ((fs->flow_type & FLOW_EXT)) {
5140                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5141                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5142         }
5143
5144         if (fs->flow_type & FLOW_MAC_EXT) {
5145                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5146                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5147         }
5148
5149         return 0;
5150 }
5151
5152 /* make sure being called after lock up with fd_rule_lock */
5153 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5154                                 struct hclge_fd_rule *rule)
5155 {
5156         int ret;
5157
5158         if (!rule) {
5159                 dev_err(&hdev->pdev->dev,
5160                         "The flow director rule is NULL\n");
5161                 return -EINVAL;
5162         }
5163
5164         /* it will never fail here, so needn't to check return value */
5165         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5166
5167         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5168         if (ret)
5169                 goto clear_rule;
5170
5171         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5172         if (ret)
5173                 goto clear_rule;
5174
5175         return 0;
5176
5177 clear_rule:
5178         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5179         return ret;
5180 }
5181
5182 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5183                               struct ethtool_rxnfc *cmd)
5184 {
5185         struct hclge_vport *vport = hclge_get_vport(handle);
5186         struct hclge_dev *hdev = vport->back;
5187         u16 dst_vport_id = 0, q_index = 0;
5188         struct ethtool_rx_flow_spec *fs;
5189         struct hclge_fd_rule *rule;
5190         u32 unused = 0;
5191         u8 action;
5192         int ret;
5193
5194         if (!hnae3_dev_fd_supported(hdev))
5195                 return -EOPNOTSUPP;
5196
5197         if (!hdev->fd_en) {
5198                 dev_warn(&hdev->pdev->dev,
5199                          "Please enable flow director first\n");
5200                 return -EOPNOTSUPP;
5201         }
5202
5203         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5204
5205         ret = hclge_fd_check_spec(hdev, fs, &unused);
5206         if (ret) {
5207                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5208                 return ret;
5209         }
5210
5211         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5212                 action = HCLGE_FD_ACTION_DROP_PACKET;
5213         } else {
5214                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5215                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5216                 u16 tqps;
5217
5218                 if (vf > hdev->num_req_vfs) {
5219                         dev_err(&hdev->pdev->dev,
5220                                 "Error: vf id (%d) > max vf num (%d)\n",
5221                                 vf, hdev->num_req_vfs);
5222                         return -EINVAL;
5223                 }
5224
5225                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5226                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5227
5228                 if (ring >= tqps) {
5229                         dev_err(&hdev->pdev->dev,
5230                                 "Error: queue id (%d) > max tqp num (%d)\n",
5231                                 ring, tqps - 1);
5232                         return -EINVAL;
5233                 }
5234
5235                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5236                 q_index = ring;
5237         }
5238
5239         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5240         if (!rule)
5241                 return -ENOMEM;
5242
5243         ret = hclge_fd_get_tuple(hdev, fs, rule);
5244         if (ret) {
5245                 kfree(rule);
5246                 return ret;
5247         }
5248
5249         rule->flow_type = fs->flow_type;
5250
5251         rule->location = fs->location;
5252         rule->unused_tuple = unused;
5253         rule->vf_id = dst_vport_id;
5254         rule->queue_id = q_index;
5255         rule->action = action;
5256         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5257
5258         /* to avoid rule conflict, when user configure rule by ethtool,
5259          * we need to clear all arfs rules
5260          */
5261         hclge_clear_arfs_rules(handle);
5262
5263         spin_lock_bh(&hdev->fd_rule_lock);
5264         ret = hclge_fd_config_rule(hdev, rule);
5265
5266         spin_unlock_bh(&hdev->fd_rule_lock);
5267
5268         return ret;
5269 }
5270
5271 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5272                               struct ethtool_rxnfc *cmd)
5273 {
5274         struct hclge_vport *vport = hclge_get_vport(handle);
5275         struct hclge_dev *hdev = vport->back;
5276         struct ethtool_rx_flow_spec *fs;
5277         int ret;
5278
5279         if (!hnae3_dev_fd_supported(hdev))
5280                 return -EOPNOTSUPP;
5281
5282         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5283
5284         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5285                 return -EINVAL;
5286
5287         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5288                 dev_err(&hdev->pdev->dev,
5289                         "Delete fail, rule %d is inexistent\n",
5290                         fs->location);
5291                 return -ENOENT;
5292         }
5293
5294         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5295                                    fs->location, NULL, false);
5296         if (ret)
5297                 return ret;
5298
5299         spin_lock_bh(&hdev->fd_rule_lock);
5300         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5301
5302         spin_unlock_bh(&hdev->fd_rule_lock);
5303
5304         return ret;
5305 }
5306
5307 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5308                                      bool clear_list)
5309 {
5310         struct hclge_vport *vport = hclge_get_vport(handle);
5311         struct hclge_dev *hdev = vport->back;
5312         struct hclge_fd_rule *rule;
5313         struct hlist_node *node;
5314         u16 location;
5315
5316         if (!hnae3_dev_fd_supported(hdev))
5317                 return;
5318
5319         spin_lock_bh(&hdev->fd_rule_lock);
5320         for_each_set_bit(location, hdev->fd_bmap,
5321                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5322                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5323                                      NULL, false);
5324
5325         if (clear_list) {
5326                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5327                                           rule_node) {
5328                         hlist_del(&rule->rule_node);
5329                         kfree(rule);
5330                 }
5331                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5332                 hdev->hclge_fd_rule_num = 0;
5333                 bitmap_zero(hdev->fd_bmap,
5334                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5335         }
5336
5337         spin_unlock_bh(&hdev->fd_rule_lock);
5338 }
5339
5340 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5341 {
5342         struct hclge_vport *vport = hclge_get_vport(handle);
5343         struct hclge_dev *hdev = vport->back;
5344         struct hclge_fd_rule *rule;
5345         struct hlist_node *node;
5346         int ret;
5347
5348         /* Return ok here, because reset error handling will check this
5349          * return value. If error is returned here, the reset process will
5350          * fail.
5351          */
5352         if (!hnae3_dev_fd_supported(hdev))
5353                 return 0;
5354
5355         /* if fd is disabled, should not restore it when reset */
5356         if (!hdev->fd_en)
5357                 return 0;
5358
5359         spin_lock_bh(&hdev->fd_rule_lock);
5360         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5361                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5362                 if (!ret)
5363                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5364
5365                 if (ret) {
5366                         dev_warn(&hdev->pdev->dev,
5367                                  "Restore rule %d failed, remove it\n",
5368                                  rule->location);
5369                         clear_bit(rule->location, hdev->fd_bmap);
5370                         hlist_del(&rule->rule_node);
5371                         kfree(rule);
5372                         hdev->hclge_fd_rule_num--;
5373                 }
5374         }
5375
5376         if (hdev->hclge_fd_rule_num)
5377                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5378
5379         spin_unlock_bh(&hdev->fd_rule_lock);
5380
5381         return 0;
5382 }
5383
5384 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5385                                  struct ethtool_rxnfc *cmd)
5386 {
5387         struct hclge_vport *vport = hclge_get_vport(handle);
5388         struct hclge_dev *hdev = vport->back;
5389
5390         if (!hnae3_dev_fd_supported(hdev))
5391                 return -EOPNOTSUPP;
5392
5393         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5394         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5395
5396         return 0;
5397 }
5398
5399 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5400                                   struct ethtool_rxnfc *cmd)
5401 {
5402         struct hclge_vport *vport = hclge_get_vport(handle);
5403         struct hclge_fd_rule *rule = NULL;
5404         struct hclge_dev *hdev = vport->back;
5405         struct ethtool_rx_flow_spec *fs;
5406         struct hlist_node *node2;
5407
5408         if (!hnae3_dev_fd_supported(hdev))
5409                 return -EOPNOTSUPP;
5410
5411         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5412
5413         spin_lock_bh(&hdev->fd_rule_lock);
5414
5415         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5416                 if (rule->location >= fs->location)
5417                         break;
5418         }
5419
5420         if (!rule || fs->location != rule->location) {
5421                 spin_unlock_bh(&hdev->fd_rule_lock);
5422
5423                 return -ENOENT;
5424         }
5425
5426         fs->flow_type = rule->flow_type;
5427         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5428         case SCTP_V4_FLOW:
5429         case TCP_V4_FLOW:
5430         case UDP_V4_FLOW:
5431                 fs->h_u.tcp_ip4_spec.ip4src =
5432                                 cpu_to_be32(rule->tuples.src_ip[3]);
5433                 fs->m_u.tcp_ip4_spec.ip4src =
5434                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5435                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5436
5437                 fs->h_u.tcp_ip4_spec.ip4dst =
5438                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5439                 fs->m_u.tcp_ip4_spec.ip4dst =
5440                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5441                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5442
5443                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5444                 fs->m_u.tcp_ip4_spec.psrc =
5445                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5446                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5447
5448                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5449                 fs->m_u.tcp_ip4_spec.pdst =
5450                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5451                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5452
5453                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5454                 fs->m_u.tcp_ip4_spec.tos =
5455                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5456                                 0 : rule->tuples_mask.ip_tos;
5457
5458                 break;
5459         case IP_USER_FLOW:
5460                 fs->h_u.usr_ip4_spec.ip4src =
5461                                 cpu_to_be32(rule->tuples.src_ip[3]);
5462                 fs->m_u.tcp_ip4_spec.ip4src =
5463                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5464                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5465
5466                 fs->h_u.usr_ip4_spec.ip4dst =
5467                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5468                 fs->m_u.usr_ip4_spec.ip4dst =
5469                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5470                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5471
5472                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5473                 fs->m_u.usr_ip4_spec.tos =
5474                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5475                                 0 : rule->tuples_mask.ip_tos;
5476
5477                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5478                 fs->m_u.usr_ip4_spec.proto =
5479                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5480                                 0 : rule->tuples_mask.ip_proto;
5481
5482                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5483
5484                 break;
5485         case SCTP_V6_FLOW:
5486         case TCP_V6_FLOW:
5487         case UDP_V6_FLOW:
5488                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5489                                   rule->tuples.src_ip, 4);
5490                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5491                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5492                 else
5493                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5494                                           rule->tuples_mask.src_ip, 4);
5495
5496                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5497                                   rule->tuples.dst_ip, 4);
5498                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5499                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5500                 else
5501                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5502                                           rule->tuples_mask.dst_ip, 4);
5503
5504                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5505                 fs->m_u.tcp_ip6_spec.psrc =
5506                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5507                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5508
5509                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5510                 fs->m_u.tcp_ip6_spec.pdst =
5511                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5512                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5513
5514                 break;
5515         case IPV6_USER_FLOW:
5516                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5517                                   rule->tuples.src_ip, 4);
5518                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5519                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5520                 else
5521                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5522                                           rule->tuples_mask.src_ip, 4);
5523
5524                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5525                                   rule->tuples.dst_ip, 4);
5526                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5527                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5528                 else
5529                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5530                                           rule->tuples_mask.dst_ip, 4);
5531
5532                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5533                 fs->m_u.usr_ip6_spec.l4_proto =
5534                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5535                                 0 : rule->tuples_mask.ip_proto;
5536
5537                 break;
5538         case ETHER_FLOW:
5539                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5540                                 rule->tuples.src_mac);
5541                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5542                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5543                 else
5544                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5545                                         rule->tuples_mask.src_mac);
5546
5547                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5548                                 rule->tuples.dst_mac);
5549                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5550                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5551                 else
5552                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5553                                         rule->tuples_mask.dst_mac);
5554
5555                 fs->h_u.ether_spec.h_proto =
5556                                 cpu_to_be16(rule->tuples.ether_proto);
5557                 fs->m_u.ether_spec.h_proto =
5558                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5559                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5560
5561                 break;
5562         default:
5563                 spin_unlock_bh(&hdev->fd_rule_lock);
5564                 return -EOPNOTSUPP;
5565         }
5566
5567         if (fs->flow_type & FLOW_EXT) {
5568                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5569                 fs->m_ext.vlan_tci =
5570                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5571                                 cpu_to_be16(VLAN_VID_MASK) :
5572                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5573         }
5574
5575         if (fs->flow_type & FLOW_MAC_EXT) {
5576                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5577                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5578                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5579                 else
5580                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5581                                         rule->tuples_mask.dst_mac);
5582         }
5583
5584         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5585                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5586         } else {
5587                 u64 vf_id;
5588
5589                 fs->ring_cookie = rule->queue_id;
5590                 vf_id = rule->vf_id;
5591                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5592                 fs->ring_cookie |= vf_id;
5593         }
5594
5595         spin_unlock_bh(&hdev->fd_rule_lock);
5596
5597         return 0;
5598 }
5599
5600 static int hclge_get_all_rules(struct hnae3_handle *handle,
5601                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5602 {
5603         struct hclge_vport *vport = hclge_get_vport(handle);
5604         struct hclge_dev *hdev = vport->back;
5605         struct hclge_fd_rule *rule;
5606         struct hlist_node *node2;
5607         int cnt = 0;
5608
5609         if (!hnae3_dev_fd_supported(hdev))
5610                 return -EOPNOTSUPP;
5611
5612         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5613
5614         spin_lock_bh(&hdev->fd_rule_lock);
5615         hlist_for_each_entry_safe(rule, node2,
5616                                   &hdev->fd_rule_list, rule_node) {
5617                 if (cnt == cmd->rule_cnt) {
5618                         spin_unlock_bh(&hdev->fd_rule_lock);
5619                         return -EMSGSIZE;
5620                 }
5621
5622                 rule_locs[cnt] = rule->location;
5623                 cnt++;
5624         }
5625
5626         spin_unlock_bh(&hdev->fd_rule_lock);
5627
5628         cmd->rule_cnt = cnt;
5629
5630         return 0;
5631 }
5632
5633 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5634                                      struct hclge_fd_rule_tuples *tuples)
5635 {
5636         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5637         tuples->ip_proto = fkeys->basic.ip_proto;
5638         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5639
5640         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5641                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5642                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5643         } else {
5644                 memcpy(tuples->src_ip,
5645                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5646                        sizeof(tuples->src_ip));
5647                 memcpy(tuples->dst_ip,
5648                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5649                        sizeof(tuples->dst_ip));
5650         }
5651 }
5652
5653 /* traverse all rules, check whether an existed rule has the same tuples */
5654 static struct hclge_fd_rule *
5655 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5656                           const struct hclge_fd_rule_tuples *tuples)
5657 {
5658         struct hclge_fd_rule *rule = NULL;
5659         struct hlist_node *node;
5660
5661         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5662                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5663                         return rule;
5664         }
5665
5666         return NULL;
5667 }
5668
5669 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5670                                      struct hclge_fd_rule *rule)
5671 {
5672         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5673                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5674                              BIT(INNER_SRC_PORT);
5675         rule->action = 0;
5676         rule->vf_id = 0;
5677         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5678         if (tuples->ether_proto == ETH_P_IP) {
5679                 if (tuples->ip_proto == IPPROTO_TCP)
5680                         rule->flow_type = TCP_V4_FLOW;
5681                 else
5682                         rule->flow_type = UDP_V4_FLOW;
5683         } else {
5684                 if (tuples->ip_proto == IPPROTO_TCP)
5685                         rule->flow_type = TCP_V6_FLOW;
5686                 else
5687                         rule->flow_type = UDP_V6_FLOW;
5688         }
5689         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5690         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5691 }
5692
5693 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5694                                       u16 flow_id, struct flow_keys *fkeys)
5695 {
5696         struct hclge_vport *vport = hclge_get_vport(handle);
5697         struct hclge_fd_rule_tuples new_tuples;
5698         struct hclge_dev *hdev = vport->back;
5699         struct hclge_fd_rule *rule;
5700         u16 tmp_queue_id;
5701         u16 bit_id;
5702         int ret;
5703
5704         if (!hnae3_dev_fd_supported(hdev))
5705                 return -EOPNOTSUPP;
5706
5707         memset(&new_tuples, 0, sizeof(new_tuples));
5708         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5709
5710         spin_lock_bh(&hdev->fd_rule_lock);
5711
5712         /* when there is already fd rule existed add by user,
5713          * arfs should not work
5714          */
5715         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5716                 spin_unlock_bh(&hdev->fd_rule_lock);
5717
5718                 return -EOPNOTSUPP;
5719         }
5720
5721         /* check is there flow director filter existed for this flow,
5722          * if not, create a new filter for it;
5723          * if filter exist with different queue id, modify the filter;
5724          * if filter exist with same queue id, do nothing
5725          */
5726         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5727         if (!rule) {
5728                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5729                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5730                         spin_unlock_bh(&hdev->fd_rule_lock);
5731
5732                         return -ENOSPC;
5733                 }
5734
5735                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5736                 if (!rule) {
5737                         spin_unlock_bh(&hdev->fd_rule_lock);
5738
5739                         return -ENOMEM;
5740                 }
5741
5742                 set_bit(bit_id, hdev->fd_bmap);
5743                 rule->location = bit_id;
5744                 rule->flow_id = flow_id;
5745                 rule->queue_id = queue_id;
5746                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5747                 ret = hclge_fd_config_rule(hdev, rule);
5748
5749                 spin_unlock_bh(&hdev->fd_rule_lock);
5750
5751                 if (ret)
5752                         return ret;
5753
5754                 return rule->location;
5755         }
5756
5757         spin_unlock_bh(&hdev->fd_rule_lock);
5758
5759         if (rule->queue_id == queue_id)
5760                 return rule->location;
5761
5762         tmp_queue_id = rule->queue_id;
5763         rule->queue_id = queue_id;
5764         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5765         if (ret) {
5766                 rule->queue_id = tmp_queue_id;
5767                 return ret;
5768         }
5769
5770         return rule->location;
5771 }
5772
5773 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5774 {
5775 #ifdef CONFIG_RFS_ACCEL
5776         struct hnae3_handle *handle = &hdev->vport[0].nic;
5777         struct hclge_fd_rule *rule;
5778         struct hlist_node *node;
5779         HLIST_HEAD(del_list);
5780
5781         spin_lock_bh(&hdev->fd_rule_lock);
5782         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5783                 spin_unlock_bh(&hdev->fd_rule_lock);
5784                 return;
5785         }
5786         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5787                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5788                                         rule->flow_id, rule->location)) {
5789                         hlist_del_init(&rule->rule_node);
5790                         hlist_add_head(&rule->rule_node, &del_list);
5791                         hdev->hclge_fd_rule_num--;
5792                         clear_bit(rule->location, hdev->fd_bmap);
5793                 }
5794         }
5795         spin_unlock_bh(&hdev->fd_rule_lock);
5796
5797         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5798                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5799                                      rule->location, NULL, false);
5800                 kfree(rule);
5801         }
5802 #endif
5803 }
5804
5805 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5806 {
5807 #ifdef CONFIG_RFS_ACCEL
5808         struct hclge_vport *vport = hclge_get_vport(handle);
5809         struct hclge_dev *hdev = vport->back;
5810
5811         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5812                 hclge_del_all_fd_entries(handle, true);
5813 #endif
5814 }
5815
5816 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5817 {
5818         struct hclge_vport *vport = hclge_get_vport(handle);
5819         struct hclge_dev *hdev = vport->back;
5820
5821         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5822                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5823 }
5824
5825 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5826 {
5827         struct hclge_vport *vport = hclge_get_vport(handle);
5828         struct hclge_dev *hdev = vport->back;
5829
5830         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5831 }
5832
5833 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5834 {
5835         struct hclge_vport *vport = hclge_get_vport(handle);
5836         struct hclge_dev *hdev = vport->back;
5837
5838         return hdev->rst_stats.hw_reset_done_cnt;
5839 }
5840
5841 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5842 {
5843         struct hclge_vport *vport = hclge_get_vport(handle);
5844         struct hclge_dev *hdev = vport->back;
5845         bool clear;
5846
5847         hdev->fd_en = enable;
5848         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5849         if (!enable)
5850                 hclge_del_all_fd_entries(handle, clear);
5851         else
5852                 hclge_restore_fd_entries(handle);
5853 }
5854
5855 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5856 {
5857         struct hclge_desc desc;
5858         struct hclge_config_mac_mode_cmd *req =
5859                 (struct hclge_config_mac_mode_cmd *)desc.data;
5860         u32 loop_en = 0;
5861         int ret;
5862
5863         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5864         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5865         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5866         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5867         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5868         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5869         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5870         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5871         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5872         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5873         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5874         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5875         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5876         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5877         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5878         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5879
5880         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5881         if (ret)
5882                 dev_err(&hdev->pdev->dev,
5883                         "mac enable fail, ret =%d.\n", ret);
5884 }
5885
5886 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5887 {
5888         struct hclge_config_mac_mode_cmd *req;
5889         struct hclge_desc desc;
5890         u32 loop_en;
5891         int ret;
5892
5893         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5894         /* 1 Read out the MAC mode config at first */
5895         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5896         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5897         if (ret) {
5898                 dev_err(&hdev->pdev->dev,
5899                         "mac loopback get fail, ret =%d.\n", ret);
5900                 return ret;
5901         }
5902
5903         /* 2 Then setup the loopback flag */
5904         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5905         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5906         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5907         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5908
5909         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5910
5911         /* 3 Config mac work mode with loopback flag
5912          * and its original configure parameters
5913          */
5914         hclge_cmd_reuse_desc(&desc, false);
5915         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5916         if (ret)
5917                 dev_err(&hdev->pdev->dev,
5918                         "mac loopback set fail, ret =%d.\n", ret);
5919         return ret;
5920 }
5921
5922 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5923                                      enum hnae3_loop loop_mode)
5924 {
5925 #define HCLGE_SERDES_RETRY_MS   10
5926 #define HCLGE_SERDES_RETRY_NUM  100
5927
5928 #define HCLGE_MAC_LINK_STATUS_MS   10
5929 #define HCLGE_MAC_LINK_STATUS_NUM  100
5930 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5931 #define HCLGE_MAC_LINK_STATUS_UP   1
5932
5933         struct hclge_serdes_lb_cmd *req;
5934         struct hclge_desc desc;
5935         int mac_link_ret = 0;
5936         int ret, i = 0;
5937         u8 loop_mode_b;
5938
5939         req = (struct hclge_serdes_lb_cmd *)desc.data;
5940         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5941
5942         switch (loop_mode) {
5943         case HNAE3_LOOP_SERIAL_SERDES:
5944                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5945                 break;
5946         case HNAE3_LOOP_PARALLEL_SERDES:
5947                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5948                 break;
5949         default:
5950                 dev_err(&hdev->pdev->dev,
5951                         "unsupported serdes loopback mode %d\n", loop_mode);
5952                 return -ENOTSUPP;
5953         }
5954
5955         if (en) {
5956                 req->enable = loop_mode_b;
5957                 req->mask = loop_mode_b;
5958                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5959         } else {
5960                 req->mask = loop_mode_b;
5961                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5962         }
5963
5964         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5965         if (ret) {
5966                 dev_err(&hdev->pdev->dev,
5967                         "serdes loopback set fail, ret = %d\n", ret);
5968                 return ret;
5969         }
5970
5971         do {
5972                 msleep(HCLGE_SERDES_RETRY_MS);
5973                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5974                                            true);
5975                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5976                 if (ret) {
5977                         dev_err(&hdev->pdev->dev,
5978                                 "serdes loopback get, ret = %d\n", ret);
5979                         return ret;
5980                 }
5981         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5982                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5983
5984         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5985                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5986                 return -EBUSY;
5987         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5988                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5989                 return -EIO;
5990         }
5991
5992         hclge_cfg_mac_mode(hdev, en);
5993
5994         i = 0;
5995         do {
5996                 /* serdes Internal loopback, independent of the network cable.*/
5997                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5998                 ret = hclge_get_mac_link_status(hdev);
5999                 if (ret == mac_link_ret)
6000                         return 0;
6001         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6002
6003         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6004
6005         return -EBUSY;
6006 }
6007
6008 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
6009                             int stream_id, bool enable)
6010 {
6011         struct hclge_desc desc;
6012         struct hclge_cfg_com_tqp_queue_cmd *req =
6013                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6014         int ret;
6015
6016         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6017         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6018         req->stream_id = cpu_to_le16(stream_id);
6019         req->enable |= enable << HCLGE_TQP_ENABLE_B;
6020
6021         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6022         if (ret)
6023                 dev_err(&hdev->pdev->dev,
6024                         "Tqp enable fail, status =%d.\n", ret);
6025         return ret;
6026 }
6027
6028 static int hclge_set_loopback(struct hnae3_handle *handle,
6029                               enum hnae3_loop loop_mode, bool en)
6030 {
6031         struct hclge_vport *vport = hclge_get_vport(handle);
6032         struct hnae3_knic_private_info *kinfo;
6033         struct hclge_dev *hdev = vport->back;
6034         int i, ret;
6035
6036         switch (loop_mode) {
6037         case HNAE3_LOOP_APP:
6038                 ret = hclge_set_app_loopback(hdev, en);
6039                 break;
6040         case HNAE3_LOOP_SERIAL_SERDES:
6041         case HNAE3_LOOP_PARALLEL_SERDES:
6042                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6043                 break;
6044         default:
6045                 ret = -ENOTSUPP;
6046                 dev_err(&hdev->pdev->dev,
6047                         "loop_mode %d is not supported\n", loop_mode);
6048                 break;
6049         }
6050
6051         if (ret)
6052                 return ret;
6053
6054         kinfo = &vport->nic.kinfo;
6055         for (i = 0; i < kinfo->num_tqps; i++) {
6056                 ret = hclge_tqp_enable(hdev, i, 0, en);
6057                 if (ret)
6058                         return ret;
6059         }
6060
6061         return 0;
6062 }
6063
6064 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6065 {
6066         struct hclge_vport *vport = hclge_get_vport(handle);
6067         struct hnae3_knic_private_info *kinfo;
6068         struct hnae3_queue *queue;
6069         struct hclge_tqp *tqp;
6070         int i;
6071
6072         kinfo = &vport->nic.kinfo;
6073         for (i = 0; i < kinfo->num_tqps; i++) {
6074                 queue = handle->kinfo.tqp[i];
6075                 tqp = container_of(queue, struct hclge_tqp, q);
6076                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6077         }
6078 }
6079
6080 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6081 {
6082         struct hclge_vport *vport = hclge_get_vport(handle);
6083         struct hclge_dev *hdev = vport->back;
6084
6085         if (enable) {
6086                 mod_timer(&hdev->service_timer, jiffies + HZ);
6087         } else {
6088                 del_timer_sync(&hdev->service_timer);
6089                 cancel_work_sync(&hdev->service_task);
6090                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6091         }
6092 }
6093
6094 static int hclge_ae_start(struct hnae3_handle *handle)
6095 {
6096         struct hclge_vport *vport = hclge_get_vport(handle);
6097         struct hclge_dev *hdev = vport->back;
6098
6099         /* mac enable */
6100         hclge_cfg_mac_mode(hdev, true);
6101         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6102         hdev->hw.mac.link = 0;
6103
6104         /* reset tqp stats */
6105         hclge_reset_tqp_stats(handle);
6106
6107         hclge_mac_start_phy(hdev);
6108
6109         return 0;
6110 }
6111
6112 static void hclge_ae_stop(struct hnae3_handle *handle)
6113 {
6114         struct hclge_vport *vport = hclge_get_vport(handle);
6115         struct hclge_dev *hdev = vport->back;
6116         int i;
6117
6118         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6119
6120         hclge_clear_arfs_rules(handle);
6121
6122         /* If it is not PF reset, the firmware will disable the MAC,
6123          * so it only need to stop phy here.
6124          */
6125         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6126             hdev->reset_type != HNAE3_FUNC_RESET) {
6127                 hclge_mac_stop_phy(hdev);
6128                 return;
6129         }
6130
6131         for (i = 0; i < handle->kinfo.num_tqps; i++)
6132                 hclge_reset_tqp(handle, i);
6133
6134         /* Mac disable */
6135         hclge_cfg_mac_mode(hdev, false);
6136
6137         hclge_mac_stop_phy(hdev);
6138
6139         /* reset tqp stats */
6140         hclge_reset_tqp_stats(handle);
6141         hclge_update_link_status(hdev);
6142 }
6143
6144 int hclge_vport_start(struct hclge_vport *vport)
6145 {
6146         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6147         vport->last_active_jiffies = jiffies;
6148         return 0;
6149 }
6150
6151 void hclge_vport_stop(struct hclge_vport *vport)
6152 {
6153         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6154 }
6155
6156 static int hclge_client_start(struct hnae3_handle *handle)
6157 {
6158         struct hclge_vport *vport = hclge_get_vport(handle);
6159
6160         return hclge_vport_start(vport);
6161 }
6162
6163 static void hclge_client_stop(struct hnae3_handle *handle)
6164 {
6165         struct hclge_vport *vport = hclge_get_vport(handle);
6166
6167         hclge_vport_stop(vport);
6168 }
6169
6170 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6171                                          u16 cmdq_resp, u8  resp_code,
6172                                          enum hclge_mac_vlan_tbl_opcode op)
6173 {
6174         struct hclge_dev *hdev = vport->back;
6175         int return_status = -EIO;
6176
6177         if (cmdq_resp) {
6178                 dev_err(&hdev->pdev->dev,
6179                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6180                         cmdq_resp);
6181                 return -EIO;
6182         }
6183
6184         if (op == HCLGE_MAC_VLAN_ADD) {
6185                 if ((!resp_code) || (resp_code == 1)) {
6186                         return_status = 0;
6187                 } else if (resp_code == 2) {
6188                         return_status = -ENOSPC;
6189                         dev_err(&hdev->pdev->dev,
6190                                 "add mac addr failed for uc_overflow.\n");
6191                 } else if (resp_code == 3) {
6192                         return_status = -ENOSPC;
6193                         dev_err(&hdev->pdev->dev,
6194                                 "add mac addr failed for mc_overflow.\n");
6195                 } else {
6196                         dev_err(&hdev->pdev->dev,
6197                                 "add mac addr failed for undefined, code=%d.\n",
6198                                 resp_code);
6199                 }
6200         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6201                 if (!resp_code) {
6202                         return_status = 0;
6203                 } else if (resp_code == 1) {
6204                         return_status = -ENOENT;
6205                         dev_dbg(&hdev->pdev->dev,
6206                                 "remove mac addr failed for miss.\n");
6207                 } else {
6208                         dev_err(&hdev->pdev->dev,
6209                                 "remove mac addr failed for undefined, code=%d.\n",
6210                                 resp_code);
6211                 }
6212         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6213                 if (!resp_code) {
6214                         return_status = 0;
6215                 } else if (resp_code == 1) {
6216                         return_status = -ENOENT;
6217                         dev_dbg(&hdev->pdev->dev,
6218                                 "lookup mac addr failed for miss.\n");
6219                 } else {
6220                         dev_err(&hdev->pdev->dev,
6221                                 "lookup mac addr failed for undefined, code=%d.\n",
6222                                 resp_code);
6223                 }
6224         } else {
6225                 return_status = -EINVAL;
6226                 dev_err(&hdev->pdev->dev,
6227                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6228                         op);
6229         }
6230
6231         return return_status;
6232 }
6233
6234 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6235 {
6236         int word_num;
6237         int bit_num;
6238
6239         if (vfid > 255 || vfid < 0)
6240                 return -EIO;
6241
6242         if (vfid >= 0 && vfid <= 191) {
6243                 word_num = vfid / 32;
6244                 bit_num  = vfid % 32;
6245                 if (clr)
6246                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6247                 else
6248                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6249         } else {
6250                 word_num = (vfid - 192) / 32;
6251                 bit_num  = vfid % 32;
6252                 if (clr)
6253                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6254                 else
6255                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6256         }
6257
6258         return 0;
6259 }
6260
6261 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6262 {
6263 #define HCLGE_DESC_NUMBER 3
6264 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6265         int i, j;
6266
6267         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6268                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6269                         if (desc[i].data[j])
6270                                 return false;
6271
6272         return true;
6273 }
6274
6275 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6276                                    const u8 *addr, bool is_mc)
6277 {
6278         const unsigned char *mac_addr = addr;
6279         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6280                        (mac_addr[0]) | (mac_addr[1] << 8);
6281         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6282
6283         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6284         if (is_mc) {
6285                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6286                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6287         }
6288
6289         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6290         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6291 }
6292
6293 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6294                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6295 {
6296         struct hclge_dev *hdev = vport->back;
6297         struct hclge_desc desc;
6298         u8 resp_code;
6299         u16 retval;
6300         int ret;
6301
6302         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6303
6304         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6305
6306         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6307         if (ret) {
6308                 dev_err(&hdev->pdev->dev,
6309                         "del mac addr failed for cmd_send, ret =%d.\n",
6310                         ret);
6311                 return ret;
6312         }
6313         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6314         retval = le16_to_cpu(desc.retval);
6315
6316         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6317                                              HCLGE_MAC_VLAN_REMOVE);
6318 }
6319
6320 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6321                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6322                                      struct hclge_desc *desc,
6323                                      bool is_mc)
6324 {
6325         struct hclge_dev *hdev = vport->back;
6326         u8 resp_code;
6327         u16 retval;
6328         int ret;
6329
6330         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6331         if (is_mc) {
6332                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6333                 memcpy(desc[0].data,
6334                        req,
6335                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6336                 hclge_cmd_setup_basic_desc(&desc[1],
6337                                            HCLGE_OPC_MAC_VLAN_ADD,
6338                                            true);
6339                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6340                 hclge_cmd_setup_basic_desc(&desc[2],
6341                                            HCLGE_OPC_MAC_VLAN_ADD,
6342                                            true);
6343                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6344         } else {
6345                 memcpy(desc[0].data,
6346                        req,
6347                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6348                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6349         }
6350         if (ret) {
6351                 dev_err(&hdev->pdev->dev,
6352                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6353                         ret);
6354                 return ret;
6355         }
6356         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6357         retval = le16_to_cpu(desc[0].retval);
6358
6359         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6360                                              HCLGE_MAC_VLAN_LKUP);
6361 }
6362
6363 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6364                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6365                                   struct hclge_desc *mc_desc)
6366 {
6367         struct hclge_dev *hdev = vport->back;
6368         int cfg_status;
6369         u8 resp_code;
6370         u16 retval;
6371         int ret;
6372
6373         if (!mc_desc) {
6374                 struct hclge_desc desc;
6375
6376                 hclge_cmd_setup_basic_desc(&desc,
6377                                            HCLGE_OPC_MAC_VLAN_ADD,
6378                                            false);
6379                 memcpy(desc.data, req,
6380                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6381                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6382                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6383                 retval = le16_to_cpu(desc.retval);
6384
6385                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6386                                                            resp_code,
6387                                                            HCLGE_MAC_VLAN_ADD);
6388         } else {
6389                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6390                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6391                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6392                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6393                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6394                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6395                 memcpy(mc_desc[0].data, req,
6396                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6397                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6398                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6399                 retval = le16_to_cpu(mc_desc[0].retval);
6400
6401                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6402                                                            resp_code,
6403                                                            HCLGE_MAC_VLAN_ADD);
6404         }
6405
6406         if (ret) {
6407                 dev_err(&hdev->pdev->dev,
6408                         "add mac addr failed for cmd_send, ret =%d.\n",
6409                         ret);
6410                 return ret;
6411         }
6412
6413         return cfg_status;
6414 }
6415
6416 static int hclge_init_umv_space(struct hclge_dev *hdev)
6417 {
6418         u16 allocated_size = 0;
6419         int ret;
6420
6421         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6422                                   true);
6423         if (ret)
6424                 return ret;
6425
6426         if (allocated_size < hdev->wanted_umv_size)
6427                 dev_warn(&hdev->pdev->dev,
6428                          "Alloc umv space failed, want %d, get %d\n",
6429                          hdev->wanted_umv_size, allocated_size);
6430
6431         mutex_init(&hdev->umv_mutex);
6432         hdev->max_umv_size = allocated_size;
6433         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6434         hdev->share_umv_size = hdev->priv_umv_size +
6435                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6436
6437         return 0;
6438 }
6439
6440 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6441 {
6442         int ret;
6443
6444         if (hdev->max_umv_size > 0) {
6445                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6446                                           false);
6447                 if (ret)
6448                         return ret;
6449                 hdev->max_umv_size = 0;
6450         }
6451         mutex_destroy(&hdev->umv_mutex);
6452
6453         return 0;
6454 }
6455
6456 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6457                                u16 *allocated_size, bool is_alloc)
6458 {
6459         struct hclge_umv_spc_alc_cmd *req;
6460         struct hclge_desc desc;
6461         int ret;
6462
6463         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6464         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6465         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6466         req->space_size = cpu_to_le32(space_size);
6467
6468         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6469         if (ret) {
6470                 dev_err(&hdev->pdev->dev,
6471                         "%s umv space failed for cmd_send, ret =%d\n",
6472                         is_alloc ? "allocate" : "free", ret);
6473                 return ret;
6474         }
6475
6476         if (is_alloc && allocated_size)
6477                 *allocated_size = le32_to_cpu(desc.data[1]);
6478
6479         return 0;
6480 }
6481
6482 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6483 {
6484         struct hclge_vport *vport;
6485         int i;
6486
6487         for (i = 0; i < hdev->num_alloc_vport; i++) {
6488                 vport = &hdev->vport[i];
6489                 vport->used_umv_num = 0;
6490         }
6491
6492         mutex_lock(&hdev->umv_mutex);
6493         hdev->share_umv_size = hdev->priv_umv_size +
6494                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6495         mutex_unlock(&hdev->umv_mutex);
6496 }
6497
6498 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6499 {
6500         struct hclge_dev *hdev = vport->back;
6501         bool is_full;
6502
6503         mutex_lock(&hdev->umv_mutex);
6504         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6505                    hdev->share_umv_size == 0);
6506         mutex_unlock(&hdev->umv_mutex);
6507
6508         return is_full;
6509 }
6510
6511 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6512 {
6513         struct hclge_dev *hdev = vport->back;
6514
6515         mutex_lock(&hdev->umv_mutex);
6516         if (is_free) {
6517                 if (vport->used_umv_num > hdev->priv_umv_size)
6518                         hdev->share_umv_size++;
6519
6520                 if (vport->used_umv_num > 0)
6521                         vport->used_umv_num--;
6522         } else {
6523                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6524                     hdev->share_umv_size > 0)
6525                         hdev->share_umv_size--;
6526                 vport->used_umv_num++;
6527         }
6528         mutex_unlock(&hdev->umv_mutex);
6529 }
6530
6531 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6532                              const unsigned char *addr)
6533 {
6534         struct hclge_vport *vport = hclge_get_vport(handle);
6535
6536         return hclge_add_uc_addr_common(vport, addr);
6537 }
6538
6539 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6540                              const unsigned char *addr)
6541 {
6542         struct hclge_dev *hdev = vport->back;
6543         struct hclge_mac_vlan_tbl_entry_cmd req;
6544         struct hclge_desc desc;
6545         u16 egress_port = 0;
6546         int ret;
6547
6548         /* mac addr check */
6549         if (is_zero_ether_addr(addr) ||
6550             is_broadcast_ether_addr(addr) ||
6551             is_multicast_ether_addr(addr)) {
6552                 dev_err(&hdev->pdev->dev,
6553                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6554                          addr,
6555                          is_zero_ether_addr(addr),
6556                          is_broadcast_ether_addr(addr),
6557                          is_multicast_ether_addr(addr));
6558                 return -EINVAL;
6559         }
6560
6561         memset(&req, 0, sizeof(req));
6562
6563         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6564                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6565
6566         req.egress_port = cpu_to_le16(egress_port);
6567
6568         hclge_prepare_mac_addr(&req, addr, false);
6569
6570         /* Lookup the mac address in the mac_vlan table, and add
6571          * it if the entry is inexistent. Repeated unicast entry
6572          * is not allowed in the mac vlan table.
6573          */
6574         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6575         if (ret == -ENOENT) {
6576                 if (!hclge_is_umv_space_full(vport)) {
6577                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6578                         if (!ret)
6579                                 hclge_update_umv_space(vport, false);
6580                         return ret;
6581                 }
6582
6583                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6584                         hdev->priv_umv_size);
6585
6586                 return -ENOSPC;
6587         }
6588
6589         /* check if we just hit the duplicate */
6590         if (!ret) {
6591                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6592                          vport->vport_id, addr);
6593                 return 0;
6594         }
6595
6596         dev_err(&hdev->pdev->dev,
6597                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6598                 addr);
6599
6600         return ret;
6601 }
6602
6603 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6604                             const unsigned char *addr)
6605 {
6606         struct hclge_vport *vport = hclge_get_vport(handle);
6607
6608         return hclge_rm_uc_addr_common(vport, addr);
6609 }
6610
6611 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6612                             const unsigned char *addr)
6613 {
6614         struct hclge_dev *hdev = vport->back;
6615         struct hclge_mac_vlan_tbl_entry_cmd req;
6616         int ret;
6617
6618         /* mac addr check */
6619         if (is_zero_ether_addr(addr) ||
6620             is_broadcast_ether_addr(addr) ||
6621             is_multicast_ether_addr(addr)) {
6622                 dev_dbg(&hdev->pdev->dev,
6623                         "Remove mac err! invalid mac:%pM.\n",
6624                          addr);
6625                 return -EINVAL;
6626         }
6627
6628         memset(&req, 0, sizeof(req));
6629         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6630         hclge_prepare_mac_addr(&req, addr, false);
6631         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6632         if (!ret)
6633                 hclge_update_umv_space(vport, true);
6634
6635         return ret;
6636 }
6637
6638 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6639                              const unsigned char *addr)
6640 {
6641         struct hclge_vport *vport = hclge_get_vport(handle);
6642
6643         return hclge_add_mc_addr_common(vport, addr);
6644 }
6645
6646 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6647                              const unsigned char *addr)
6648 {
6649         struct hclge_dev *hdev = vport->back;
6650         struct hclge_mac_vlan_tbl_entry_cmd req;
6651         struct hclge_desc desc[3];
6652         int status;
6653
6654         /* mac addr check */
6655         if (!is_multicast_ether_addr(addr)) {
6656                 dev_err(&hdev->pdev->dev,
6657                         "Add mc mac err! invalid mac:%pM.\n",
6658                          addr);
6659                 return -EINVAL;
6660         }
6661         memset(&req, 0, sizeof(req));
6662         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6663         hclge_prepare_mac_addr(&req, addr, true);
6664         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6665         if (!status) {
6666                 /* This mac addr exist, update VFID for it */
6667                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6668                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6669         } else {
6670                 /* This mac addr do not exist, add new entry for it */
6671                 memset(desc[0].data, 0, sizeof(desc[0].data));
6672                 memset(desc[1].data, 0, sizeof(desc[0].data));
6673                 memset(desc[2].data, 0, sizeof(desc[0].data));
6674                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6675                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6676         }
6677
6678         if (status == -ENOSPC)
6679                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6680
6681         return status;
6682 }
6683
6684 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6685                             const unsigned char *addr)
6686 {
6687         struct hclge_vport *vport = hclge_get_vport(handle);
6688
6689         return hclge_rm_mc_addr_common(vport, addr);
6690 }
6691
6692 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6693                             const unsigned char *addr)
6694 {
6695         struct hclge_dev *hdev = vport->back;
6696         struct hclge_mac_vlan_tbl_entry_cmd req;
6697         enum hclge_cmd_status status;
6698         struct hclge_desc desc[3];
6699
6700         /* mac addr check */
6701         if (!is_multicast_ether_addr(addr)) {
6702                 dev_dbg(&hdev->pdev->dev,
6703                         "Remove mc mac err! invalid mac:%pM.\n",
6704                          addr);
6705                 return -EINVAL;
6706         }
6707
6708         memset(&req, 0, sizeof(req));
6709         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6710         hclge_prepare_mac_addr(&req, addr, true);
6711         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6712         if (!status) {
6713                 /* This mac addr exist, remove this handle's VFID for it */
6714                 hclge_update_desc_vfid(desc, vport->vport_id, true);
6715
6716                 if (hclge_is_all_function_id_zero(desc))
6717                         /* All the vfid is zero, so need to delete this entry */
6718                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6719                 else
6720                         /* Not all the vfid is zero, update the vfid */
6721                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6722
6723         } else {
6724                 /* Maybe this mac address is in mta table, but it cannot be
6725                  * deleted here because an entry of mta represents an address
6726                  * range rather than a specific address. the delete action to
6727                  * all entries will take effect in update_mta_status called by
6728                  * hns3_nic_set_rx_mode.
6729                  */
6730                 status = 0;
6731         }
6732
6733         return status;
6734 }
6735
6736 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6737                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6738 {
6739         struct hclge_vport_mac_addr_cfg *mac_cfg;
6740         struct list_head *list;
6741
6742         if (!vport->vport_id)
6743                 return;
6744
6745         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6746         if (!mac_cfg)
6747                 return;
6748
6749         mac_cfg->hd_tbl_status = true;
6750         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6751
6752         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6753                &vport->uc_mac_list : &vport->mc_mac_list;
6754
6755         list_add_tail(&mac_cfg->node, list);
6756 }
6757
6758 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6759                               bool is_write_tbl,
6760                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6761 {
6762         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6763         struct list_head *list;
6764         bool uc_flag, mc_flag;
6765
6766         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6767                &vport->uc_mac_list : &vport->mc_mac_list;
6768
6769         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6770         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6771
6772         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6773                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6774                         if (uc_flag && mac_cfg->hd_tbl_status)
6775                                 hclge_rm_uc_addr_common(vport, mac_addr);
6776
6777                         if (mc_flag && mac_cfg->hd_tbl_status)
6778                                 hclge_rm_mc_addr_common(vport, mac_addr);
6779
6780                         list_del(&mac_cfg->node);
6781                         kfree(mac_cfg);
6782                         break;
6783                 }
6784         }
6785 }
6786
6787 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6788                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6789 {
6790         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6791         struct list_head *list;
6792
6793         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6794                &vport->uc_mac_list : &vport->mc_mac_list;
6795
6796         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6797                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6798                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6799
6800                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6801                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6802
6803                 mac_cfg->hd_tbl_status = false;
6804                 if (is_del_list) {
6805                         list_del(&mac_cfg->node);
6806                         kfree(mac_cfg);
6807                 }
6808         }
6809 }
6810
6811 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6812 {
6813         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6814         struct hclge_vport *vport;
6815         int i;
6816
6817         mutex_lock(&hdev->vport_cfg_mutex);
6818         for (i = 0; i < hdev->num_alloc_vport; i++) {
6819                 vport = &hdev->vport[i];
6820                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6821                         list_del(&mac->node);
6822                         kfree(mac);
6823                 }
6824
6825                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6826                         list_del(&mac->node);
6827                         kfree(mac);
6828                 }
6829         }
6830         mutex_unlock(&hdev->vport_cfg_mutex);
6831 }
6832
6833 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6834                                               u16 cmdq_resp, u8 resp_code)
6835 {
6836 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6837 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6838 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6839 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6840
6841         int return_status;
6842
6843         if (cmdq_resp) {
6844                 dev_err(&hdev->pdev->dev,
6845                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6846                         cmdq_resp);
6847                 return -EIO;
6848         }
6849
6850         switch (resp_code) {
6851         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6852         case HCLGE_ETHERTYPE_ALREADY_ADD:
6853                 return_status = 0;
6854                 break;
6855         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6856                 dev_err(&hdev->pdev->dev,
6857                         "add mac ethertype failed for manager table overflow.\n");
6858                 return_status = -EIO;
6859                 break;
6860         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6861                 dev_err(&hdev->pdev->dev,
6862                         "add mac ethertype failed for key conflict.\n");
6863                 return_status = -EIO;
6864                 break;
6865         default:
6866                 dev_err(&hdev->pdev->dev,
6867                         "add mac ethertype failed for undefined, code=%d.\n",
6868                         resp_code);
6869                 return_status = -EIO;
6870         }
6871
6872         return return_status;
6873 }
6874
6875 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6876                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6877 {
6878         struct hclge_desc desc;
6879         u8 resp_code;
6880         u16 retval;
6881         int ret;
6882
6883         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6884         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6885
6886         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6887         if (ret) {
6888                 dev_err(&hdev->pdev->dev,
6889                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6890                         ret);
6891                 return ret;
6892         }
6893
6894         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6895         retval = le16_to_cpu(desc.retval);
6896
6897         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6898 }
6899
6900 static int init_mgr_tbl(struct hclge_dev *hdev)
6901 {
6902         int ret;
6903         int i;
6904
6905         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6906                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6907                 if (ret) {
6908                         dev_err(&hdev->pdev->dev,
6909                                 "add mac ethertype failed, ret =%d.\n",
6910                                 ret);
6911                         return ret;
6912                 }
6913         }
6914
6915         return 0;
6916 }
6917
6918 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6919 {
6920         struct hclge_vport *vport = hclge_get_vport(handle);
6921         struct hclge_dev *hdev = vport->back;
6922
6923         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6924 }
6925
6926 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6927                               bool is_first)
6928 {
6929         const unsigned char *new_addr = (const unsigned char *)p;
6930         struct hclge_vport *vport = hclge_get_vport(handle);
6931         struct hclge_dev *hdev = vport->back;
6932         int ret;
6933
6934         /* mac addr check */
6935         if (is_zero_ether_addr(new_addr) ||
6936             is_broadcast_ether_addr(new_addr) ||
6937             is_multicast_ether_addr(new_addr)) {
6938                 dev_err(&hdev->pdev->dev,
6939                         "Change uc mac err! invalid mac:%p.\n",
6940                          new_addr);
6941                 return -EINVAL;
6942         }
6943
6944         if ((!is_first || is_kdump_kernel()) &&
6945             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6946                 dev_warn(&hdev->pdev->dev,
6947                          "remove old uc mac address fail.\n");
6948
6949         ret = hclge_add_uc_addr(handle, new_addr);
6950         if (ret) {
6951                 dev_err(&hdev->pdev->dev,
6952                         "add uc mac address fail, ret =%d.\n",
6953                         ret);
6954
6955                 if (!is_first &&
6956                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6957                         dev_err(&hdev->pdev->dev,
6958                                 "restore uc mac address fail.\n");
6959
6960                 return -EIO;
6961         }
6962
6963         ret = hclge_pause_addr_cfg(hdev, new_addr);
6964         if (ret) {
6965                 dev_err(&hdev->pdev->dev,
6966                         "configure mac pause address fail, ret =%d.\n",
6967                         ret);
6968                 return -EIO;
6969         }
6970
6971         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6972
6973         return 0;
6974 }
6975
6976 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6977                           int cmd)
6978 {
6979         struct hclge_vport *vport = hclge_get_vport(handle);
6980         struct hclge_dev *hdev = vport->back;
6981
6982         if (!hdev->hw.mac.phydev)
6983                 return -EOPNOTSUPP;
6984
6985         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6986 }
6987
6988 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6989                                       u8 fe_type, bool filter_en, u8 vf_id)
6990 {
6991         struct hclge_vlan_filter_ctrl_cmd *req;
6992         struct hclge_desc desc;
6993         int ret;
6994
6995         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6996
6997         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6998         req->vlan_type = vlan_type;
6999         req->vlan_fe = filter_en ? fe_type : 0;
7000         req->vf_id = vf_id;
7001
7002         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7003         if (ret)
7004                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7005                         ret);
7006
7007         return ret;
7008 }
7009
7010 #define HCLGE_FILTER_TYPE_VF            0
7011 #define HCLGE_FILTER_TYPE_PORT          1
7012 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7013 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7014 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7015 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7016 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7017 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7018                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7019 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7020                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7021
7022 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7023 {
7024         struct hclge_vport *vport = hclge_get_vport(handle);
7025         struct hclge_dev *hdev = vport->back;
7026
7027         if (hdev->pdev->revision >= 0x21) {
7028                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7029                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7030                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7031                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7032         } else {
7033                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7034                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7035                                            0);
7036         }
7037         if (enable)
7038                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7039         else
7040                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7041 }
7042
7043 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7044                                     bool is_kill, u16 vlan, u8 qos,
7045                                     __be16 proto)
7046 {
7047 #define HCLGE_MAX_VF_BYTES  16
7048         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7049         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7050         struct hclge_desc desc[2];
7051         u8 vf_byte_val;
7052         u8 vf_byte_off;
7053         int ret;
7054
7055         hclge_cmd_setup_basic_desc(&desc[0],
7056                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7057         hclge_cmd_setup_basic_desc(&desc[1],
7058                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7059
7060         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7061
7062         vf_byte_off = vfid / 8;
7063         vf_byte_val = 1 << (vfid % 8);
7064
7065         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7066         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7067
7068         req0->vlan_id  = cpu_to_le16(vlan);
7069         req0->vlan_cfg = is_kill;
7070
7071         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7072                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7073         else
7074                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7075
7076         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7077         if (ret) {
7078                 dev_err(&hdev->pdev->dev,
7079                         "Send vf vlan command fail, ret =%d.\n",
7080                         ret);
7081                 return ret;
7082         }
7083
7084         if (!is_kill) {
7085 #define HCLGE_VF_VLAN_NO_ENTRY  2
7086                 if (!req0->resp_code || req0->resp_code == 1)
7087                         return 0;
7088
7089                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7090                         dev_warn(&hdev->pdev->dev,
7091                                  "vf vlan table is full, vf vlan filter is disabled\n");
7092                         return 0;
7093                 }
7094
7095                 dev_err(&hdev->pdev->dev,
7096                         "Add vf vlan filter fail, ret =%d.\n",
7097                         req0->resp_code);
7098         } else {
7099 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7100                 if (!req0->resp_code)
7101                         return 0;
7102
7103                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7104                         dev_warn(&hdev->pdev->dev,
7105                                  "vlan %d filter is not in vf vlan table\n",
7106                                  vlan);
7107                         return 0;
7108                 }
7109
7110                 dev_err(&hdev->pdev->dev,
7111                         "Kill vf vlan filter fail, ret =%d.\n",
7112                         req0->resp_code);
7113         }
7114
7115         return -EIO;
7116 }
7117
7118 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7119                                       u16 vlan_id, bool is_kill)
7120 {
7121         struct hclge_vlan_filter_pf_cfg_cmd *req;
7122         struct hclge_desc desc;
7123         u8 vlan_offset_byte_val;
7124         u8 vlan_offset_byte;
7125         u8 vlan_offset_160;
7126         int ret;
7127
7128         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7129
7130         vlan_offset_160 = vlan_id / 160;
7131         vlan_offset_byte = (vlan_id % 160) / 8;
7132         vlan_offset_byte_val = 1 << (vlan_id % 8);
7133
7134         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7135         req->vlan_offset = vlan_offset_160;
7136         req->vlan_cfg = is_kill;
7137         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7138
7139         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7140         if (ret)
7141                 dev_err(&hdev->pdev->dev,
7142                         "port vlan command, send fail, ret =%d.\n", ret);
7143         return ret;
7144 }
7145
7146 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7147                                     u16 vport_id, u16 vlan_id, u8 qos,
7148                                     bool is_kill)
7149 {
7150         u16 vport_idx, vport_num = 0;
7151         int ret;
7152
7153         if (is_kill && !vlan_id)
7154                 return 0;
7155
7156         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7157                                        0, proto);
7158         if (ret) {
7159                 dev_err(&hdev->pdev->dev,
7160                         "Set %d vport vlan filter config fail, ret =%d.\n",
7161                         vport_id, ret);
7162                 return ret;
7163         }
7164
7165         /* vlan 0 may be added twice when 8021q module is enabled */
7166         if (!is_kill && !vlan_id &&
7167             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7168                 return 0;
7169
7170         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7171                 dev_err(&hdev->pdev->dev,
7172                         "Add port vlan failed, vport %d is already in vlan %d\n",
7173                         vport_id, vlan_id);
7174                 return -EINVAL;
7175         }
7176
7177         if (is_kill &&
7178             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7179                 dev_err(&hdev->pdev->dev,
7180                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7181                         vport_id, vlan_id);
7182                 return -EINVAL;
7183         }
7184
7185         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7186                 vport_num++;
7187
7188         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7189                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7190                                                  is_kill);
7191
7192         return ret;
7193 }
7194
7195 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7196 {
7197         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7198         struct hclge_vport_vtag_tx_cfg_cmd *req;
7199         struct hclge_dev *hdev = vport->back;
7200         struct hclge_desc desc;
7201         int status;
7202
7203         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7204
7205         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7206         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7207         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7208         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7209                       vcfg->accept_tag1 ? 1 : 0);
7210         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7211                       vcfg->accept_untag1 ? 1 : 0);
7212         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7213                       vcfg->accept_tag2 ? 1 : 0);
7214         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7215                       vcfg->accept_untag2 ? 1 : 0);
7216         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7217                       vcfg->insert_tag1_en ? 1 : 0);
7218         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7219                       vcfg->insert_tag2_en ? 1 : 0);
7220         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7221
7222         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7223         req->vf_bitmap[req->vf_offset] =
7224                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7225
7226         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7227         if (status)
7228                 dev_err(&hdev->pdev->dev,
7229                         "Send port txvlan cfg command fail, ret =%d\n",
7230                         status);
7231
7232         return status;
7233 }
7234
7235 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7236 {
7237         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7238         struct hclge_vport_vtag_rx_cfg_cmd *req;
7239         struct hclge_dev *hdev = vport->back;
7240         struct hclge_desc desc;
7241         int status;
7242
7243         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7244
7245         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7246         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7247                       vcfg->strip_tag1_en ? 1 : 0);
7248         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7249                       vcfg->strip_tag2_en ? 1 : 0);
7250         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7251                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7252         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7253                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7254
7255         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7256         req->vf_bitmap[req->vf_offset] =
7257                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7258
7259         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7260         if (status)
7261                 dev_err(&hdev->pdev->dev,
7262                         "Send port rxvlan cfg command fail, ret =%d\n",
7263                         status);
7264
7265         return status;
7266 }
7267
7268 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7269                                   u16 port_base_vlan_state,
7270                                   u16 vlan_tag)
7271 {
7272         int ret;
7273
7274         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7275                 vport->txvlan_cfg.accept_tag1 = true;
7276                 vport->txvlan_cfg.insert_tag1_en = false;
7277                 vport->txvlan_cfg.default_tag1 = 0;
7278         } else {
7279                 vport->txvlan_cfg.accept_tag1 = false;
7280                 vport->txvlan_cfg.insert_tag1_en = true;
7281                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7282         }
7283
7284         vport->txvlan_cfg.accept_untag1 = true;
7285
7286         /* accept_tag2 and accept_untag2 are not supported on
7287          * pdev revision(0x20), new revision support them,
7288          * this two fields can not be configured by user.
7289          */
7290         vport->txvlan_cfg.accept_tag2 = true;
7291         vport->txvlan_cfg.accept_untag2 = true;
7292         vport->txvlan_cfg.insert_tag2_en = false;
7293         vport->txvlan_cfg.default_tag2 = 0;
7294
7295         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7296                 vport->rxvlan_cfg.strip_tag1_en = false;
7297                 vport->rxvlan_cfg.strip_tag2_en =
7298                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7299         } else {
7300                 vport->rxvlan_cfg.strip_tag1_en =
7301                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7302                 vport->rxvlan_cfg.strip_tag2_en = true;
7303         }
7304         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7305         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7306
7307         ret = hclge_set_vlan_tx_offload_cfg(vport);
7308         if (ret)
7309                 return ret;
7310
7311         return hclge_set_vlan_rx_offload_cfg(vport);
7312 }
7313
7314 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7315 {
7316         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7317         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7318         struct hclge_desc desc;
7319         int status;
7320
7321         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7322         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7323         rx_req->ot_fst_vlan_type =
7324                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7325         rx_req->ot_sec_vlan_type =
7326                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7327         rx_req->in_fst_vlan_type =
7328                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7329         rx_req->in_sec_vlan_type =
7330                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7331
7332         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7333         if (status) {
7334                 dev_err(&hdev->pdev->dev,
7335                         "Send rxvlan protocol type command fail, ret =%d\n",
7336                         status);
7337                 return status;
7338         }
7339
7340         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7341
7342         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7343         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7344         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7345
7346         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7347         if (status)
7348                 dev_err(&hdev->pdev->dev,
7349                         "Send txvlan protocol type command fail, ret =%d\n",
7350                         status);
7351
7352         return status;
7353 }
7354
7355 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7356 {
7357 #define HCLGE_DEF_VLAN_TYPE             0x8100
7358
7359         struct hnae3_handle *handle = &hdev->vport[0].nic;
7360         struct hclge_vport *vport;
7361         int ret;
7362         int i;
7363
7364         if (hdev->pdev->revision >= 0x21) {
7365                 /* for revision 0x21, vf vlan filter is per function */
7366                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7367                         vport = &hdev->vport[i];
7368                         ret = hclge_set_vlan_filter_ctrl(hdev,
7369                                                          HCLGE_FILTER_TYPE_VF,
7370                                                          HCLGE_FILTER_FE_EGRESS,
7371                                                          true,
7372                                                          vport->vport_id);
7373                         if (ret)
7374                                 return ret;
7375                 }
7376
7377                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7378                                                  HCLGE_FILTER_FE_INGRESS, true,
7379                                                  0);
7380                 if (ret)
7381                         return ret;
7382         } else {
7383                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7384                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7385                                                  true, 0);
7386                 if (ret)
7387                         return ret;
7388         }
7389
7390         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7391
7392         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7393         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7394         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7395         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7396         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7397         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7398
7399         ret = hclge_set_vlan_protocol_type(hdev);
7400         if (ret)
7401                 return ret;
7402
7403         for (i = 0; i < hdev->num_alloc_vport; i++) {
7404                 u16 vlan_tag;
7405
7406                 vport = &hdev->vport[i];
7407                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7408
7409                 ret = hclge_vlan_offload_cfg(vport,
7410                                              vport->port_base_vlan_cfg.state,
7411                                              vlan_tag);
7412                 if (ret)
7413                         return ret;
7414         }
7415
7416         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7417 }
7418
7419 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7420                                        bool writen_to_tbl)
7421 {
7422         struct hclge_vport_vlan_cfg *vlan;
7423
7424         /* vlan 0 is reserved */
7425         if (!vlan_id)
7426                 return;
7427
7428         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7429         if (!vlan)
7430                 return;
7431
7432         vlan->hd_tbl_status = writen_to_tbl;
7433         vlan->vlan_id = vlan_id;
7434
7435         list_add_tail(&vlan->node, &vport->vlan_list);
7436 }
7437
7438 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7439 {
7440         struct hclge_vport_vlan_cfg *vlan, *tmp;
7441         struct hclge_dev *hdev = vport->back;
7442         int ret;
7443
7444         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7445                 if (!vlan->hd_tbl_status) {
7446                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7447                                                        vport->vport_id,
7448                                                        vlan->vlan_id, 0, false);
7449                         if (ret) {
7450                                 dev_err(&hdev->pdev->dev,
7451                                         "restore vport vlan list failed, ret=%d\n",
7452                                         ret);
7453                                 return ret;
7454                         }
7455                 }
7456                 vlan->hd_tbl_status = true;
7457         }
7458
7459         return 0;
7460 }
7461
7462 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7463                                       bool is_write_tbl)
7464 {
7465         struct hclge_vport_vlan_cfg *vlan, *tmp;
7466         struct hclge_dev *hdev = vport->back;
7467
7468         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7469                 if (vlan->vlan_id == vlan_id) {
7470                         if (is_write_tbl && vlan->hd_tbl_status)
7471                                 hclge_set_vlan_filter_hw(hdev,
7472                                                          htons(ETH_P_8021Q),
7473                                                          vport->vport_id,
7474                                                          vlan_id, 0,
7475                                                          true);
7476
7477                         list_del(&vlan->node);
7478                         kfree(vlan);
7479                         break;
7480                 }
7481         }
7482 }
7483
7484 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7485 {
7486         struct hclge_vport_vlan_cfg *vlan, *tmp;
7487         struct hclge_dev *hdev = vport->back;
7488
7489         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7490                 if (vlan->hd_tbl_status)
7491                         hclge_set_vlan_filter_hw(hdev,
7492                                                  htons(ETH_P_8021Q),
7493                                                  vport->vport_id,
7494                                                  vlan->vlan_id, 0,
7495                                                  true);
7496
7497                 vlan->hd_tbl_status = false;
7498                 if (is_del_list) {
7499                         list_del(&vlan->node);
7500                         kfree(vlan);
7501                 }
7502         }
7503 }
7504
7505 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7506 {
7507         struct hclge_vport_vlan_cfg *vlan, *tmp;
7508         struct hclge_vport *vport;
7509         int i;
7510
7511         mutex_lock(&hdev->vport_cfg_mutex);
7512         for (i = 0; i < hdev->num_alloc_vport; i++) {
7513                 vport = &hdev->vport[i];
7514                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7515                         list_del(&vlan->node);
7516                         kfree(vlan);
7517                 }
7518         }
7519         mutex_unlock(&hdev->vport_cfg_mutex);
7520 }
7521
7522 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7523 {
7524         struct hclge_vport *vport = hclge_get_vport(handle);
7525
7526         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7527                 vport->rxvlan_cfg.strip_tag1_en = false;
7528                 vport->rxvlan_cfg.strip_tag2_en = enable;
7529         } else {
7530                 vport->rxvlan_cfg.strip_tag1_en = enable;
7531                 vport->rxvlan_cfg.strip_tag2_en = true;
7532         }
7533         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7534         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7535         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7536
7537         return hclge_set_vlan_rx_offload_cfg(vport);
7538 }
7539
7540 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7541                                             u16 port_base_vlan_state,
7542                                             struct hclge_vlan_info *new_info,
7543                                             struct hclge_vlan_info *old_info)
7544 {
7545         struct hclge_dev *hdev = vport->back;
7546         int ret;
7547
7548         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7549                 hclge_rm_vport_all_vlan_table(vport, false);
7550                 return hclge_set_vlan_filter_hw(hdev,
7551                                                  htons(new_info->vlan_proto),
7552                                                  vport->vport_id,
7553                                                  new_info->vlan_tag,
7554                                                  new_info->qos, false);
7555         }
7556
7557         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7558                                        vport->vport_id, old_info->vlan_tag,
7559                                        old_info->qos, true);
7560         if (ret)
7561                 return ret;
7562
7563         return hclge_add_vport_all_vlan_table(vport);
7564 }
7565
7566 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7567                                     struct hclge_vlan_info *vlan_info)
7568 {
7569         struct hnae3_handle *nic = &vport->nic;
7570         struct hclge_vlan_info *old_vlan_info;
7571         struct hclge_dev *hdev = vport->back;
7572         int ret;
7573
7574         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7575
7576         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7577         if (ret)
7578                 return ret;
7579
7580         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7581                 /* add new VLAN tag */
7582                 ret = hclge_set_vlan_filter_hw(hdev,
7583                                                htons(vlan_info->vlan_proto),
7584                                                vport->vport_id,
7585                                                vlan_info->vlan_tag,
7586                                                vlan_info->qos, false);
7587                 if (ret)
7588                         return ret;
7589
7590                 /* remove old VLAN tag */
7591                 ret = hclge_set_vlan_filter_hw(hdev,
7592                                                htons(old_vlan_info->vlan_proto),
7593                                                vport->vport_id,
7594                                                old_vlan_info->vlan_tag,
7595                                                old_vlan_info->qos, true);
7596                 if (ret)
7597                         return ret;
7598
7599                 goto update;
7600         }
7601
7602         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7603                                                old_vlan_info);
7604         if (ret)
7605                 return ret;
7606
7607         /* update state only when disable/enable port based VLAN */
7608         vport->port_base_vlan_cfg.state = state;
7609         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7610                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7611         else
7612                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7613
7614 update:
7615         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7616         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7617         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7618
7619         return 0;
7620 }
7621
7622 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7623                                           enum hnae3_port_base_vlan_state state,
7624                                           u16 vlan)
7625 {
7626         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7627                 if (!vlan)
7628                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7629                 else
7630                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7631         } else {
7632                 if (!vlan)
7633                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7634                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7635                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7636                 else
7637                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7638         }
7639 }
7640
7641 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7642                                     u16 vlan, u8 qos, __be16 proto)
7643 {
7644         struct hclge_vport *vport = hclge_get_vport(handle);
7645         struct hclge_dev *hdev = vport->back;
7646         struct hclge_vlan_info vlan_info;
7647         u16 state;
7648         int ret;
7649
7650         if (hdev->pdev->revision == 0x20)
7651                 return -EOPNOTSUPP;
7652
7653         /* qos is a 3 bits value, so can not be bigger than 7 */
7654         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7655                 return -EINVAL;
7656         if (proto != htons(ETH_P_8021Q))
7657                 return -EPROTONOSUPPORT;
7658
7659         vport = &hdev->vport[vfid];
7660         state = hclge_get_port_base_vlan_state(vport,
7661                                                vport->port_base_vlan_cfg.state,
7662                                                vlan);
7663         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7664                 return 0;
7665
7666         vlan_info.vlan_tag = vlan;
7667         vlan_info.qos = qos;
7668         vlan_info.vlan_proto = ntohs(proto);
7669
7670         /* update port based VLAN for PF */
7671         if (!vfid) {
7672                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7673                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7674                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7675
7676                 return ret;
7677         }
7678
7679         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7680                 return hclge_update_port_base_vlan_cfg(vport, state,
7681                                                        &vlan_info);
7682         } else {
7683                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7684                                                         (u8)vfid, state,
7685                                                         vlan, qos,
7686                                                         ntohs(proto));
7687                 return ret;
7688         }
7689 }
7690
7691 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7692                           u16 vlan_id, bool is_kill)
7693 {
7694         struct hclge_vport *vport = hclge_get_vport(handle);
7695         struct hclge_dev *hdev = vport->back;
7696         bool writen_to_tbl = false;
7697         int ret = 0;
7698
7699         /* when port based VLAN enabled, we use port based VLAN as the VLAN
7700          * filter entry. In this case, we don't update VLAN filter table
7701          * when user add new VLAN or remove exist VLAN, just update the vport
7702          * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7703          * table until port based VLAN disabled
7704          */
7705         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7706                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7707                                                vlan_id, 0, is_kill);
7708                 writen_to_tbl = true;
7709         }
7710
7711         if (ret)
7712                 return ret;
7713
7714         if (is_kill)
7715                 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7716         else
7717                 hclge_add_vport_vlan_table(vport, vlan_id,
7718                                            writen_to_tbl);
7719
7720         return 0;
7721 }
7722
7723 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7724 {
7725         struct hclge_config_max_frm_size_cmd *req;
7726         struct hclge_desc desc;
7727
7728         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7729
7730         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7731         req->max_frm_size = cpu_to_le16(new_mps);
7732         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7733
7734         return hclge_cmd_send(&hdev->hw, &desc, 1);
7735 }
7736
7737 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7738 {
7739         struct hclge_vport *vport = hclge_get_vport(handle);
7740
7741         return hclge_set_vport_mtu(vport, new_mtu);
7742 }
7743
7744 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7745 {
7746         struct hclge_dev *hdev = vport->back;
7747         int i, max_frm_size, ret = 0;
7748
7749         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7750         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7751             max_frm_size > HCLGE_MAC_MAX_FRAME)
7752                 return -EINVAL;
7753
7754         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7755         mutex_lock(&hdev->vport_lock);
7756         /* VF's mps must fit within hdev->mps */
7757         if (vport->vport_id && max_frm_size > hdev->mps) {
7758                 mutex_unlock(&hdev->vport_lock);
7759                 return -EINVAL;
7760         } else if (vport->vport_id) {
7761                 vport->mps = max_frm_size;
7762                 mutex_unlock(&hdev->vport_lock);
7763                 return 0;
7764         }
7765
7766         /* PF's mps must be greater then VF's mps */
7767         for (i = 1; i < hdev->num_alloc_vport; i++)
7768                 if (max_frm_size < hdev->vport[i].mps) {
7769                         mutex_unlock(&hdev->vport_lock);
7770                         return -EINVAL;
7771                 }
7772
7773         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7774
7775         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7776         if (ret) {
7777                 dev_err(&hdev->pdev->dev,
7778                         "Change mtu fail, ret =%d\n", ret);
7779                 goto out;
7780         }
7781
7782         hdev->mps = max_frm_size;
7783         vport->mps = max_frm_size;
7784
7785         ret = hclge_buffer_alloc(hdev);
7786         if (ret)
7787                 dev_err(&hdev->pdev->dev,
7788                         "Allocate buffer fail, ret =%d\n", ret);
7789
7790 out:
7791         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7792         mutex_unlock(&hdev->vport_lock);
7793         return ret;
7794 }
7795
7796 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7797                                     bool enable)
7798 {
7799         struct hclge_reset_tqp_queue_cmd *req;
7800         struct hclge_desc desc;
7801         int ret;
7802
7803         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7804
7805         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7806         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7807         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7808
7809         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7810         if (ret) {
7811                 dev_err(&hdev->pdev->dev,
7812                         "Send tqp reset cmd error, status =%d\n", ret);
7813                 return ret;
7814         }
7815
7816         return 0;
7817 }
7818
7819 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7820 {
7821         struct hclge_reset_tqp_queue_cmd *req;
7822         struct hclge_desc desc;
7823         int ret;
7824
7825         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7826
7827         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7828         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7829
7830         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7831         if (ret) {
7832                 dev_err(&hdev->pdev->dev,
7833                         "Get reset status error, status =%d\n", ret);
7834                 return ret;
7835         }
7836
7837         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7838 }
7839
7840 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7841 {
7842         struct hnae3_queue *queue;
7843         struct hclge_tqp *tqp;
7844
7845         queue = handle->kinfo.tqp[queue_id];
7846         tqp = container_of(queue, struct hclge_tqp, q);
7847
7848         return tqp->index;
7849 }
7850
7851 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7852 {
7853         struct hclge_vport *vport = hclge_get_vport(handle);
7854         struct hclge_dev *hdev = vport->back;
7855         int reset_try_times = 0;
7856         int reset_status;
7857         u16 queue_gid;
7858         int ret = 0;
7859
7860         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7861
7862         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7863         if (ret) {
7864                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7865                 return ret;
7866         }
7867
7868         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7869         if (ret) {
7870                 dev_err(&hdev->pdev->dev,
7871                         "Send reset tqp cmd fail, ret = %d\n", ret);
7872                 return ret;
7873         }
7874
7875         reset_try_times = 0;
7876         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7877                 /* Wait for tqp hw reset */
7878                 msleep(20);
7879                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7880                 if (reset_status)
7881                         break;
7882         }
7883
7884         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7885                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7886                 return ret;
7887         }
7888
7889         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7890         if (ret)
7891                 dev_err(&hdev->pdev->dev,
7892                         "Deassert the soft reset fail, ret = %d\n", ret);
7893
7894         return ret;
7895 }
7896
7897 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7898 {
7899         struct hclge_dev *hdev = vport->back;
7900         int reset_try_times = 0;
7901         int reset_status;
7902         u16 queue_gid;
7903         int ret;
7904
7905         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7906
7907         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7908         if (ret) {
7909                 dev_warn(&hdev->pdev->dev,
7910                          "Send reset tqp cmd fail, ret = %d\n", ret);
7911                 return;
7912         }
7913
7914         reset_try_times = 0;
7915         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7916                 /* Wait for tqp hw reset */
7917                 msleep(20);
7918                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7919                 if (reset_status)
7920                         break;
7921         }
7922
7923         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7924                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7925                 return;
7926         }
7927
7928         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7929         if (ret)
7930                 dev_warn(&hdev->pdev->dev,
7931                          "Deassert the soft reset fail, ret = %d\n", ret);
7932 }
7933
7934 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7935 {
7936         struct hclge_vport *vport = hclge_get_vport(handle);
7937         struct hclge_dev *hdev = vport->back;
7938
7939         return hdev->fw_version;
7940 }
7941
7942 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7943 {
7944         struct phy_device *phydev = hdev->hw.mac.phydev;
7945
7946         if (!phydev)
7947                 return;
7948
7949         phy_set_asym_pause(phydev, rx_en, tx_en);
7950 }
7951
7952 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7953 {
7954         int ret;
7955
7956         if (rx_en && tx_en)
7957                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7958         else if (rx_en && !tx_en)
7959                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7960         else if (!rx_en && tx_en)
7961                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7962         else
7963                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7964
7965         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7966                 return 0;
7967
7968         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7969         if (ret) {
7970                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7971                         ret);
7972                 return ret;
7973         }
7974
7975         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7976
7977         return 0;
7978 }
7979
7980 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7981 {
7982         struct phy_device *phydev = hdev->hw.mac.phydev;
7983         u16 remote_advertising = 0;
7984         u16 local_advertising = 0;
7985         u32 rx_pause, tx_pause;
7986         u8 flowctl;
7987
7988         if (!phydev->link || !phydev->autoneg)
7989                 return 0;
7990
7991         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7992
7993         if (phydev->pause)
7994                 remote_advertising = LPA_PAUSE_CAP;
7995
7996         if (phydev->asym_pause)
7997                 remote_advertising |= LPA_PAUSE_ASYM;
7998
7999         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8000                                            remote_advertising);
8001         tx_pause = flowctl & FLOW_CTRL_TX;
8002         rx_pause = flowctl & FLOW_CTRL_RX;
8003
8004         if (phydev->duplex == HCLGE_MAC_HALF) {
8005                 tx_pause = 0;
8006                 rx_pause = 0;
8007         }
8008
8009         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8010 }
8011
8012 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8013                                  u32 *rx_en, u32 *tx_en)
8014 {
8015         struct hclge_vport *vport = hclge_get_vport(handle);
8016         struct hclge_dev *hdev = vport->back;
8017
8018         *auto_neg = hclge_get_autoneg(handle);
8019
8020         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8021                 *rx_en = 0;
8022                 *tx_en = 0;
8023                 return;
8024         }
8025
8026         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8027                 *rx_en = 1;
8028                 *tx_en = 0;
8029         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8030                 *tx_en = 1;
8031                 *rx_en = 0;
8032         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8033                 *rx_en = 1;
8034                 *tx_en = 1;
8035         } else {
8036                 *rx_en = 0;
8037                 *tx_en = 0;
8038         }
8039 }
8040
8041 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8042                                 u32 rx_en, u32 tx_en)
8043 {
8044         struct hclge_vport *vport = hclge_get_vport(handle);
8045         struct hclge_dev *hdev = vport->back;
8046         struct phy_device *phydev = hdev->hw.mac.phydev;
8047         u32 fc_autoneg;
8048
8049         fc_autoneg = hclge_get_autoneg(handle);
8050         if (auto_neg != fc_autoneg) {
8051                 dev_info(&hdev->pdev->dev,
8052                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8053                 return -EOPNOTSUPP;
8054         }
8055
8056         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8057                 dev_info(&hdev->pdev->dev,
8058                          "Priority flow control enabled. Cannot set link flow control.\n");
8059                 return -EOPNOTSUPP;
8060         }
8061
8062         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8063
8064         if (!fc_autoneg)
8065                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8066
8067         if (phydev)
8068                 return phy_start_aneg(phydev);
8069
8070         if (hdev->pdev->revision == 0x20)
8071                 return -EOPNOTSUPP;
8072
8073         return hclge_restart_autoneg(handle);
8074 }
8075
8076 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8077                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8078 {
8079         struct hclge_vport *vport = hclge_get_vport(handle);
8080         struct hclge_dev *hdev = vport->back;
8081
8082         if (speed)
8083                 *speed = hdev->hw.mac.speed;
8084         if (duplex)
8085                 *duplex = hdev->hw.mac.duplex;
8086         if (auto_neg)
8087                 *auto_neg = hdev->hw.mac.autoneg;
8088 }
8089
8090 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8091                                  u8 *module_type)
8092 {
8093         struct hclge_vport *vport = hclge_get_vport(handle);
8094         struct hclge_dev *hdev = vport->back;
8095
8096         if (media_type)
8097                 *media_type = hdev->hw.mac.media_type;
8098
8099         if (module_type)
8100                 *module_type = hdev->hw.mac.module_type;
8101 }
8102
8103 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8104                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8105 {
8106         struct hclge_vport *vport = hclge_get_vport(handle);
8107         struct hclge_dev *hdev = vport->back;
8108         struct phy_device *phydev = hdev->hw.mac.phydev;
8109         int mdix_ctrl, mdix, retval, is_resolved;
8110
8111         if (!phydev) {
8112                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8113                 *tp_mdix = ETH_TP_MDI_INVALID;
8114                 return;
8115         }
8116
8117         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8118
8119         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8120         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8121                                     HCLGE_PHY_MDIX_CTRL_S);
8122
8123         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8124         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8125         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8126
8127         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8128
8129         switch (mdix_ctrl) {
8130         case 0x0:
8131                 *tp_mdix_ctrl = ETH_TP_MDI;
8132                 break;
8133         case 0x1:
8134                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8135                 break;
8136         case 0x3:
8137                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8138                 break;
8139         default:
8140                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8141                 break;
8142         }
8143
8144         if (!is_resolved)
8145                 *tp_mdix = ETH_TP_MDI_INVALID;
8146         else if (mdix)
8147                 *tp_mdix = ETH_TP_MDI_X;
8148         else
8149                 *tp_mdix = ETH_TP_MDI;
8150 }
8151
8152 static void hclge_info_show(struct hclge_dev *hdev)
8153 {
8154         struct device *dev = &hdev->pdev->dev;
8155
8156         dev_info(dev, "PF info begin:\n");
8157
8158         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8159         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8160         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8161         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8162         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8163         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8164         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8165         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8166         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8167         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8168         dev_info(dev, "This is %s PF\n",
8169                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8170         dev_info(dev, "DCB %s\n",
8171                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8172         dev_info(dev, "MQPRIO %s\n",
8173                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8174
8175         dev_info(dev, "PF info end.\n");
8176 }
8177
8178 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8179                                           struct hclge_vport *vport)
8180 {
8181         struct hnae3_client *client = vport->nic.client;
8182         struct hclge_dev *hdev = ae_dev->priv;
8183         int ret;
8184
8185         ret = client->ops->init_instance(&vport->nic);
8186         if (ret)
8187                 return ret;
8188
8189         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8190         hnae3_set_client_init_flag(client, ae_dev, 1);
8191
8192         if (netif_msg_drv(&hdev->vport->nic))
8193                 hclge_info_show(hdev);
8194
8195         return 0;
8196 }
8197
8198 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8199                                            struct hclge_vport *vport)
8200 {
8201         struct hnae3_client *client = vport->roce.client;
8202         struct hclge_dev *hdev = ae_dev->priv;
8203         int ret;
8204
8205         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8206             !hdev->nic_client)
8207                 return 0;
8208
8209         client = hdev->roce_client;
8210         ret = hclge_init_roce_base_info(vport);
8211         if (ret)
8212                 return ret;
8213
8214         ret = client->ops->init_instance(&vport->roce);
8215         if (ret)
8216                 return ret;
8217
8218         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8219         hnae3_set_client_init_flag(client, ae_dev, 1);
8220
8221         return 0;
8222 }
8223
8224 static int hclge_init_client_instance(struct hnae3_client *client,
8225                                       struct hnae3_ae_dev *ae_dev)
8226 {
8227         struct hclge_dev *hdev = ae_dev->priv;
8228         struct hclge_vport *vport;
8229         int i, ret;
8230
8231         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8232                 vport = &hdev->vport[i];
8233
8234                 switch (client->type) {
8235                 case HNAE3_CLIENT_KNIC:
8236
8237                         hdev->nic_client = client;
8238                         vport->nic.client = client;
8239                         ret = hclge_init_nic_client_instance(ae_dev, vport);
8240                         if (ret)
8241                                 goto clear_nic;
8242
8243                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8244                         if (ret)
8245                                 goto clear_roce;
8246
8247                         break;
8248                 case HNAE3_CLIENT_UNIC:
8249                         hdev->nic_client = client;
8250                         vport->nic.client = client;
8251
8252                         ret = client->ops->init_instance(&vport->nic);
8253                         if (ret)
8254                                 goto clear_nic;
8255
8256                         hnae3_set_client_init_flag(client, ae_dev, 1);
8257
8258                         break;
8259                 case HNAE3_CLIENT_ROCE:
8260                         if (hnae3_dev_roce_supported(hdev)) {
8261                                 hdev->roce_client = client;
8262                                 vport->roce.client = client;
8263                         }
8264
8265                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8266                         if (ret)
8267                                 goto clear_roce;
8268
8269                         break;
8270                 default:
8271                         return -EINVAL;
8272                 }
8273         }
8274
8275         return 0;
8276
8277 clear_nic:
8278         hdev->nic_client = NULL;
8279         vport->nic.client = NULL;
8280         return ret;
8281 clear_roce:
8282         hdev->roce_client = NULL;
8283         vport->roce.client = NULL;
8284         return ret;
8285 }
8286
8287 static void hclge_uninit_client_instance(struct hnae3_client *client,
8288                                          struct hnae3_ae_dev *ae_dev)
8289 {
8290         struct hclge_dev *hdev = ae_dev->priv;
8291         struct hclge_vport *vport;
8292         int i;
8293
8294         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8295                 vport = &hdev->vport[i];
8296                 if (hdev->roce_client) {
8297                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8298                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8299                                                                 0);
8300                         hdev->roce_client = NULL;
8301                         vport->roce.client = NULL;
8302                 }
8303                 if (client->type == HNAE3_CLIENT_ROCE)
8304                         return;
8305                 if (hdev->nic_client && client->ops->uninit_instance) {
8306                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8307                         client->ops->uninit_instance(&vport->nic, 0);
8308                         hdev->nic_client = NULL;
8309                         vport->nic.client = NULL;
8310                 }
8311         }
8312 }
8313
8314 static int hclge_pci_init(struct hclge_dev *hdev)
8315 {
8316         struct pci_dev *pdev = hdev->pdev;
8317         struct hclge_hw *hw;
8318         int ret;
8319
8320         ret = pci_enable_device(pdev);
8321         if (ret) {
8322                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8323                 return ret;
8324         }
8325
8326         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8327         if (ret) {
8328                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8329                 if (ret) {
8330                         dev_err(&pdev->dev,
8331                                 "can't set consistent PCI DMA");
8332                         goto err_disable_device;
8333                 }
8334                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8335         }
8336
8337         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8338         if (ret) {
8339                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8340                 goto err_disable_device;
8341         }
8342
8343         pci_set_master(pdev);
8344         hw = &hdev->hw;
8345         hw->io_base = pcim_iomap(pdev, 2, 0);
8346         if (!hw->io_base) {
8347                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8348                 ret = -ENOMEM;
8349                 goto err_clr_master;
8350         }
8351
8352         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8353
8354         return 0;
8355 err_clr_master:
8356         pci_clear_master(pdev);
8357         pci_release_regions(pdev);
8358 err_disable_device:
8359         pci_disable_device(pdev);
8360
8361         return ret;
8362 }
8363
8364 static void hclge_pci_uninit(struct hclge_dev *hdev)
8365 {
8366         struct pci_dev *pdev = hdev->pdev;
8367
8368         pcim_iounmap(pdev, hdev->hw.io_base);
8369         pci_free_irq_vectors(pdev);
8370         pci_clear_master(pdev);
8371         pci_release_mem_regions(pdev);
8372         pci_disable_device(pdev);
8373 }
8374
8375 static void hclge_state_init(struct hclge_dev *hdev)
8376 {
8377         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8378         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8379         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8380         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8381         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8382         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8383 }
8384
8385 static void hclge_state_uninit(struct hclge_dev *hdev)
8386 {
8387         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8388
8389         if (hdev->service_timer.function)
8390                 del_timer_sync(&hdev->service_timer);
8391         if (hdev->reset_timer.function)
8392                 del_timer_sync(&hdev->reset_timer);
8393         if (hdev->service_task.func)
8394                 cancel_work_sync(&hdev->service_task);
8395         if (hdev->rst_service_task.func)
8396                 cancel_work_sync(&hdev->rst_service_task);
8397         if (hdev->mbx_service_task.func)
8398                 cancel_work_sync(&hdev->mbx_service_task);
8399 }
8400
8401 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8402 {
8403 #define HCLGE_FLR_WAIT_MS       100
8404 #define HCLGE_FLR_WAIT_CNT      50
8405         struct hclge_dev *hdev = ae_dev->priv;
8406         int cnt = 0;
8407
8408         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8409         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8410         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8411         hclge_reset_event(hdev->pdev, NULL);
8412
8413         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8414                cnt++ < HCLGE_FLR_WAIT_CNT)
8415                 msleep(HCLGE_FLR_WAIT_MS);
8416
8417         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8418                 dev_err(&hdev->pdev->dev,
8419                         "flr wait down timeout: %d\n", cnt);
8420 }
8421
8422 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8423 {
8424         struct hclge_dev *hdev = ae_dev->priv;
8425
8426         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8427 }
8428
8429 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8430 {
8431         struct pci_dev *pdev = ae_dev->pdev;
8432         struct hclge_dev *hdev;
8433         int ret;
8434
8435         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8436         if (!hdev) {
8437                 ret = -ENOMEM;
8438                 goto out;
8439         }
8440
8441         hdev->pdev = pdev;
8442         hdev->ae_dev = ae_dev;
8443         hdev->reset_type = HNAE3_NONE_RESET;
8444         hdev->reset_level = HNAE3_FUNC_RESET;
8445         ae_dev->priv = hdev;
8446         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8447
8448         mutex_init(&hdev->vport_lock);
8449         mutex_init(&hdev->vport_cfg_mutex);
8450         spin_lock_init(&hdev->fd_rule_lock);
8451
8452         ret = hclge_pci_init(hdev);
8453         if (ret) {
8454                 dev_err(&pdev->dev, "PCI init failed\n");
8455                 goto out;
8456         }
8457
8458         /* Firmware command queue initialize */
8459         ret = hclge_cmd_queue_init(hdev);
8460         if (ret) {
8461                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8462                 goto err_pci_uninit;
8463         }
8464
8465         /* Firmware command initialize */
8466         ret = hclge_cmd_init(hdev);
8467         if (ret)
8468                 goto err_cmd_uninit;
8469
8470         ret = hclge_get_cap(hdev);
8471         if (ret) {
8472                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8473                         ret);
8474                 goto err_cmd_uninit;
8475         }
8476
8477         ret = hclge_configure(hdev);
8478         if (ret) {
8479                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8480                 goto err_cmd_uninit;
8481         }
8482
8483         ret = hclge_init_msi(hdev);
8484         if (ret) {
8485                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8486                 goto err_cmd_uninit;
8487         }
8488
8489         ret = hclge_misc_irq_init(hdev);
8490         if (ret) {
8491                 dev_err(&pdev->dev,
8492                         "Misc IRQ(vector0) init error, ret = %d.\n",
8493                         ret);
8494                 goto err_msi_uninit;
8495         }
8496
8497         ret = hclge_alloc_tqps(hdev);
8498         if (ret) {
8499                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8500                 goto err_msi_irq_uninit;
8501         }
8502
8503         ret = hclge_alloc_vport(hdev);
8504         if (ret) {
8505                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8506                 goto err_msi_irq_uninit;
8507         }
8508
8509         ret = hclge_map_tqp(hdev);
8510         if (ret) {
8511                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8512                 goto err_msi_irq_uninit;
8513         }
8514
8515         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8516                 ret = hclge_mac_mdio_config(hdev);
8517                 if (ret) {
8518                         dev_err(&hdev->pdev->dev,
8519                                 "mdio config fail ret=%d\n", ret);
8520                         goto err_msi_irq_uninit;
8521                 }
8522         }
8523
8524         ret = hclge_init_umv_space(hdev);
8525         if (ret) {
8526                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8527                 goto err_mdiobus_unreg;
8528         }
8529
8530         ret = hclge_mac_init(hdev);
8531         if (ret) {
8532                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8533                 goto err_mdiobus_unreg;
8534         }
8535
8536         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8537         if (ret) {
8538                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8539                 goto err_mdiobus_unreg;
8540         }
8541
8542         ret = hclge_config_gro(hdev, true);
8543         if (ret)
8544                 goto err_mdiobus_unreg;
8545
8546         ret = hclge_init_vlan_config(hdev);
8547         if (ret) {
8548                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8549                 goto err_mdiobus_unreg;
8550         }
8551
8552         ret = hclge_tm_schd_init(hdev);
8553         if (ret) {
8554                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8555                 goto err_mdiobus_unreg;
8556         }
8557
8558         hclge_rss_init_cfg(hdev);
8559         ret = hclge_rss_init_hw(hdev);
8560         if (ret) {
8561                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8562                 goto err_mdiobus_unreg;
8563         }
8564
8565         ret = init_mgr_tbl(hdev);
8566         if (ret) {
8567                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8568                 goto err_mdiobus_unreg;
8569         }
8570
8571         ret = hclge_init_fd_config(hdev);
8572         if (ret) {
8573                 dev_err(&pdev->dev,
8574                         "fd table init fail, ret=%d\n", ret);
8575                 goto err_mdiobus_unreg;
8576         }
8577
8578         ret = hclge_hw_error_set_state(hdev, true);
8579         if (ret) {
8580                 dev_err(&pdev->dev,
8581                         "fail(%d) to enable hw error interrupts\n", ret);
8582                 goto err_mdiobus_unreg;
8583         }
8584
8585         INIT_KFIFO(hdev->mac_tnl_log);
8586
8587         hclge_dcb_ops_set(hdev);
8588
8589         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8590         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8591         INIT_WORK(&hdev->service_task, hclge_service_task);
8592         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8593         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8594
8595         hclge_clear_all_event_cause(hdev);
8596
8597         /* Enable MISC vector(vector0) */
8598         hclge_enable_vector(&hdev->misc_vector, true);
8599
8600         hclge_state_init(hdev);
8601         hdev->last_reset_time = jiffies;
8602
8603         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8604         return 0;
8605
8606 err_mdiobus_unreg:
8607         if (hdev->hw.mac.phydev)
8608                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8609 err_msi_irq_uninit:
8610         hclge_misc_irq_uninit(hdev);
8611 err_msi_uninit:
8612         pci_free_irq_vectors(pdev);
8613 err_cmd_uninit:
8614         hclge_cmd_uninit(hdev);
8615 err_pci_uninit:
8616         pcim_iounmap(pdev, hdev->hw.io_base);
8617         pci_clear_master(pdev);
8618         pci_release_regions(pdev);
8619         pci_disable_device(pdev);
8620 out:
8621         return ret;
8622 }
8623
8624 static void hclge_stats_clear(struct hclge_dev *hdev)
8625 {
8626         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8627 }
8628
8629 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8630 {
8631         struct hclge_vport *vport = hdev->vport;
8632         int i;
8633
8634         for (i = 0; i < hdev->num_alloc_vport; i++) {
8635                 hclge_vport_stop(vport);
8636                 vport++;
8637         }
8638 }
8639
8640 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8641 {
8642         struct hclge_dev *hdev = ae_dev->priv;
8643         struct pci_dev *pdev = ae_dev->pdev;
8644         int ret;
8645
8646         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8647
8648         hclge_stats_clear(hdev);
8649         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8650
8651         ret = hclge_cmd_init(hdev);
8652         if (ret) {
8653                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8654                 return ret;
8655         }
8656
8657         ret = hclge_map_tqp(hdev);
8658         if (ret) {
8659                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8660                 return ret;
8661         }
8662
8663         hclge_reset_umv_space(hdev);
8664
8665         ret = hclge_mac_init(hdev);
8666         if (ret) {
8667                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8668                 return ret;
8669         }
8670
8671         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8672         if (ret) {
8673                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8674                 return ret;
8675         }
8676
8677         ret = hclge_config_gro(hdev, true);
8678         if (ret)
8679                 return ret;
8680
8681         ret = hclge_init_vlan_config(hdev);
8682         if (ret) {
8683                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8684                 return ret;
8685         }
8686
8687         ret = hclge_tm_init_hw(hdev, true);
8688         if (ret) {
8689                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8690                 return ret;
8691         }
8692
8693         ret = hclge_rss_init_hw(hdev);
8694         if (ret) {
8695                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8696                 return ret;
8697         }
8698
8699         ret = hclge_init_fd_config(hdev);
8700         if (ret) {
8701                 dev_err(&pdev->dev,
8702                         "fd table init fail, ret=%d\n", ret);
8703                 return ret;
8704         }
8705
8706         /* Re-enable the hw error interrupts because
8707          * the interrupts get disabled on core/global reset.
8708          */
8709         ret = hclge_hw_error_set_state(hdev, true);
8710         if (ret) {
8711                 dev_err(&pdev->dev,
8712                         "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8713                 return ret;
8714         }
8715
8716         hclge_reset_vport_state(hdev);
8717
8718         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8719                  HCLGE_DRIVER_NAME);
8720
8721         return 0;
8722 }
8723
8724 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8725 {
8726         struct hclge_dev *hdev = ae_dev->priv;
8727         struct hclge_mac *mac = &hdev->hw.mac;
8728
8729         hclge_state_uninit(hdev);
8730
8731         if (mac->phydev)
8732                 mdiobus_unregister(mac->mdio_bus);
8733
8734         hclge_uninit_umv_space(hdev);
8735
8736         /* Disable MISC vector(vector0) */
8737         hclge_enable_vector(&hdev->misc_vector, false);
8738         synchronize_irq(hdev->misc_vector.vector_irq);
8739
8740         hclge_config_mac_tnl_int(hdev, false);
8741         hclge_hw_error_set_state(hdev, false);
8742         hclge_cmd_uninit(hdev);
8743         hclge_misc_irq_uninit(hdev);
8744         hclge_pci_uninit(hdev);
8745         mutex_destroy(&hdev->vport_lock);
8746         hclge_uninit_vport_mac_table(hdev);
8747         hclge_uninit_vport_vlan_table(hdev);
8748         mutex_destroy(&hdev->vport_cfg_mutex);
8749         ae_dev->priv = NULL;
8750 }
8751
8752 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8753 {
8754         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8755         struct hclge_vport *vport = hclge_get_vport(handle);
8756         struct hclge_dev *hdev = vport->back;
8757
8758         return min_t(u32, hdev->rss_size_max,
8759                      vport->alloc_tqps / kinfo->num_tc);
8760 }
8761
8762 static void hclge_get_channels(struct hnae3_handle *handle,
8763                                struct ethtool_channels *ch)
8764 {
8765         ch->max_combined = hclge_get_max_channels(handle);
8766         ch->other_count = 1;
8767         ch->max_other = 1;
8768         ch->combined_count = handle->kinfo.rss_size;
8769 }
8770
8771 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8772                                         u16 *alloc_tqps, u16 *max_rss_size)
8773 {
8774         struct hclge_vport *vport = hclge_get_vport(handle);
8775         struct hclge_dev *hdev = vport->back;
8776
8777         *alloc_tqps = vport->alloc_tqps;
8778         *max_rss_size = hdev->rss_size_max;
8779 }
8780
8781 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8782                               bool rxfh_configured)
8783 {
8784         struct hclge_vport *vport = hclge_get_vport(handle);
8785         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8786         struct hclge_dev *hdev = vport->back;
8787         int cur_rss_size = kinfo->rss_size;
8788         int cur_tqps = kinfo->num_tqps;
8789         u16 tc_offset[HCLGE_MAX_TC_NUM];
8790         u16 tc_valid[HCLGE_MAX_TC_NUM];
8791         u16 tc_size[HCLGE_MAX_TC_NUM];
8792         u16 roundup_size;
8793         u32 *rss_indir;
8794         int ret, i;
8795
8796         kinfo->req_rss_size = new_tqps_num;
8797
8798         ret = hclge_tm_vport_map_update(hdev);
8799         if (ret) {
8800                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8801                 return ret;
8802         }
8803
8804         roundup_size = roundup_pow_of_two(kinfo->rss_size);
8805         roundup_size = ilog2(roundup_size);
8806         /* Set the RSS TC mode according to the new RSS size */
8807         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8808                 tc_valid[i] = 0;
8809
8810                 if (!(hdev->hw_tc_map & BIT(i)))
8811                         continue;
8812
8813                 tc_valid[i] = 1;
8814                 tc_size[i] = roundup_size;
8815                 tc_offset[i] = kinfo->rss_size * i;
8816         }
8817         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8818         if (ret)
8819                 return ret;
8820
8821         /* RSS indirection table has been configuared by user */
8822         if (rxfh_configured)
8823                 goto out;
8824
8825         /* Reinitializes the rss indirect table according to the new RSS size */
8826         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8827         if (!rss_indir)
8828                 return -ENOMEM;
8829
8830         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8831                 rss_indir[i] = i % kinfo->rss_size;
8832
8833         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8834         if (ret)
8835                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8836                         ret);
8837
8838         kfree(rss_indir);
8839
8840 out:
8841         if (!ret)
8842                 dev_info(&hdev->pdev->dev,
8843                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8844                          cur_rss_size, kinfo->rss_size,
8845                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
8846
8847         return ret;
8848 }
8849
8850 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8851                               u32 *regs_num_64_bit)
8852 {
8853         struct hclge_desc desc;
8854         u32 total_num;
8855         int ret;
8856
8857         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8858         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8859         if (ret) {
8860                 dev_err(&hdev->pdev->dev,
8861                         "Query register number cmd failed, ret = %d.\n", ret);
8862                 return ret;
8863         }
8864
8865         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8866         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8867
8868         total_num = *regs_num_32_bit + *regs_num_64_bit;
8869         if (!total_num)
8870                 return -EINVAL;
8871
8872         return 0;
8873 }
8874
8875 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8876                                  void *data)
8877 {
8878 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8879
8880         struct hclge_desc *desc;
8881         u32 *reg_val = data;
8882         __le32 *desc_data;
8883         int cmd_num;
8884         int i, k, n;
8885         int ret;
8886
8887         if (regs_num == 0)
8888                 return 0;
8889
8890         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8891         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8892         if (!desc)
8893                 return -ENOMEM;
8894
8895         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8896         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8897         if (ret) {
8898                 dev_err(&hdev->pdev->dev,
8899                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8900                 kfree(desc);
8901                 return ret;
8902         }
8903
8904         for (i = 0; i < cmd_num; i++) {
8905                 if (i == 0) {
8906                         desc_data = (__le32 *)(&desc[i].data[0]);
8907                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8908                 } else {
8909                         desc_data = (__le32 *)(&desc[i]);
8910                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8911                 }
8912                 for (k = 0; k < n; k++) {
8913                         *reg_val++ = le32_to_cpu(*desc_data++);
8914
8915                         regs_num--;
8916                         if (!regs_num)
8917                                 break;
8918                 }
8919         }
8920
8921         kfree(desc);
8922         return 0;
8923 }
8924
8925 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8926                                  void *data)
8927 {
8928 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8929
8930         struct hclge_desc *desc;
8931         u64 *reg_val = data;
8932         __le64 *desc_data;
8933         int cmd_num;
8934         int i, k, n;
8935         int ret;
8936
8937         if (regs_num == 0)
8938                 return 0;
8939
8940         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8941         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8942         if (!desc)
8943                 return -ENOMEM;
8944
8945         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8946         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8947         if (ret) {
8948                 dev_err(&hdev->pdev->dev,
8949                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
8950                 kfree(desc);
8951                 return ret;
8952         }
8953
8954         for (i = 0; i < cmd_num; i++) {
8955                 if (i == 0) {
8956                         desc_data = (__le64 *)(&desc[i].data[0]);
8957                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8958                 } else {
8959                         desc_data = (__le64 *)(&desc[i]);
8960                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
8961                 }
8962                 for (k = 0; k < n; k++) {
8963                         *reg_val++ = le64_to_cpu(*desc_data++);
8964
8965                         regs_num--;
8966                         if (!regs_num)
8967                                 break;
8968                 }
8969         }
8970
8971         kfree(desc);
8972         return 0;
8973 }
8974
8975 #define MAX_SEPARATE_NUM        4
8976 #define SEPARATOR_VALUE         0xFFFFFFFF
8977 #define REG_NUM_PER_LINE        4
8978 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
8979
8980 static int hclge_get_regs_len(struct hnae3_handle *handle)
8981 {
8982         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8983         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8984         struct hclge_vport *vport = hclge_get_vport(handle);
8985         struct hclge_dev *hdev = vport->back;
8986         u32 regs_num_32_bit, regs_num_64_bit;
8987         int ret;
8988
8989         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8990         if (ret) {
8991                 dev_err(&hdev->pdev->dev,
8992                         "Get register number failed, ret = %d.\n", ret);
8993                 return -EOPNOTSUPP;
8994         }
8995
8996         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8997         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8998         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8999         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9000
9001         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9002                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9003                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9004 }
9005
9006 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9007                            void *data)
9008 {
9009         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9010         struct hclge_vport *vport = hclge_get_vport(handle);
9011         struct hclge_dev *hdev = vport->back;
9012         u32 regs_num_32_bit, regs_num_64_bit;
9013         int i, j, reg_um, separator_num;
9014         u32 *reg = data;
9015         int ret;
9016
9017         *version = hdev->fw_version;
9018
9019         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9020         if (ret) {
9021                 dev_err(&hdev->pdev->dev,
9022                         "Get register number failed, ret = %d.\n", ret);
9023                 return;
9024         }
9025
9026         /* fetching per-PF registers valus from PF PCIe register space */
9027         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9028         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9029         for (i = 0; i < reg_um; i++)
9030                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9031         for (i = 0; i < separator_num; i++)
9032                 *reg++ = SEPARATOR_VALUE;
9033
9034         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9035         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9036         for (i = 0; i < reg_um; i++)
9037                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9038         for (i = 0; i < separator_num; i++)
9039                 *reg++ = SEPARATOR_VALUE;
9040
9041         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9042         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9043         for (j = 0; j < kinfo->num_tqps; j++) {
9044                 for (i = 0; i < reg_um; i++)
9045                         *reg++ = hclge_read_dev(&hdev->hw,
9046                                                 ring_reg_addr_list[i] +
9047                                                 0x200 * j);
9048                 for (i = 0; i < separator_num; i++)
9049                         *reg++ = SEPARATOR_VALUE;
9050         }
9051
9052         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9053         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9054         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9055                 for (i = 0; i < reg_um; i++)
9056                         *reg++ = hclge_read_dev(&hdev->hw,
9057                                                 tqp_intr_reg_addr_list[i] +
9058                                                 4 * j);
9059                 for (i = 0; i < separator_num; i++)
9060                         *reg++ = SEPARATOR_VALUE;
9061         }
9062
9063         /* fetching PF common registers values from firmware */
9064         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9065         if (ret) {
9066                 dev_err(&hdev->pdev->dev,
9067                         "Get 32 bit register failed, ret = %d.\n", ret);
9068                 return;
9069         }
9070
9071         reg += regs_num_32_bit;
9072         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9073         if (ret)
9074                 dev_err(&hdev->pdev->dev,
9075                         "Get 64 bit register failed, ret = %d.\n", ret);
9076 }
9077
9078 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9079 {
9080         struct hclge_set_led_state_cmd *req;
9081         struct hclge_desc desc;
9082         int ret;
9083
9084         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9085
9086         req = (struct hclge_set_led_state_cmd *)desc.data;
9087         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9088                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9089
9090         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9091         if (ret)
9092                 dev_err(&hdev->pdev->dev,
9093                         "Send set led state cmd error, ret =%d\n", ret);
9094
9095         return ret;
9096 }
9097
9098 enum hclge_led_status {
9099         HCLGE_LED_OFF,
9100         HCLGE_LED_ON,
9101         HCLGE_LED_NO_CHANGE = 0xFF,
9102 };
9103
9104 static int hclge_set_led_id(struct hnae3_handle *handle,
9105                             enum ethtool_phys_id_state status)
9106 {
9107         struct hclge_vport *vport = hclge_get_vport(handle);
9108         struct hclge_dev *hdev = vport->back;
9109
9110         switch (status) {
9111         case ETHTOOL_ID_ACTIVE:
9112                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9113         case ETHTOOL_ID_INACTIVE:
9114                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9115         default:
9116                 return -EINVAL;
9117         }
9118 }
9119
9120 static void hclge_get_link_mode(struct hnae3_handle *handle,
9121                                 unsigned long *supported,
9122                                 unsigned long *advertising)
9123 {
9124         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9125         struct hclge_vport *vport = hclge_get_vport(handle);
9126         struct hclge_dev *hdev = vport->back;
9127         unsigned int idx = 0;
9128
9129         for (; idx < size; idx++) {
9130                 supported[idx] = hdev->hw.mac.supported[idx];
9131                 advertising[idx] = hdev->hw.mac.advertising[idx];
9132         }
9133 }
9134
9135 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9136 {
9137         struct hclge_vport *vport = hclge_get_vport(handle);
9138         struct hclge_dev *hdev = vport->back;
9139
9140         return hclge_config_gro(hdev, enable);
9141 }
9142
9143 static const struct hnae3_ae_ops hclge_ops = {
9144         .init_ae_dev = hclge_init_ae_dev,
9145         .uninit_ae_dev = hclge_uninit_ae_dev,
9146         .flr_prepare = hclge_flr_prepare,
9147         .flr_done = hclge_flr_done,
9148         .init_client_instance = hclge_init_client_instance,
9149         .uninit_client_instance = hclge_uninit_client_instance,
9150         .map_ring_to_vector = hclge_map_ring_to_vector,
9151         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9152         .get_vector = hclge_get_vector,
9153         .put_vector = hclge_put_vector,
9154         .set_promisc_mode = hclge_set_promisc_mode,
9155         .set_loopback = hclge_set_loopback,
9156         .start = hclge_ae_start,
9157         .stop = hclge_ae_stop,
9158         .client_start = hclge_client_start,
9159         .client_stop = hclge_client_stop,
9160         .get_status = hclge_get_status,
9161         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9162         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9163         .get_media_type = hclge_get_media_type,
9164         .check_port_speed = hclge_check_port_speed,
9165         .get_fec = hclge_get_fec,
9166         .set_fec = hclge_set_fec,
9167         .get_rss_key_size = hclge_get_rss_key_size,
9168         .get_rss_indir_size = hclge_get_rss_indir_size,
9169         .get_rss = hclge_get_rss,
9170         .set_rss = hclge_set_rss,
9171         .set_rss_tuple = hclge_set_rss_tuple,
9172         .get_rss_tuple = hclge_get_rss_tuple,
9173         .get_tc_size = hclge_get_tc_size,
9174         .get_mac_addr = hclge_get_mac_addr,
9175         .set_mac_addr = hclge_set_mac_addr,
9176         .do_ioctl = hclge_do_ioctl,
9177         .add_uc_addr = hclge_add_uc_addr,
9178         .rm_uc_addr = hclge_rm_uc_addr,
9179         .add_mc_addr = hclge_add_mc_addr,
9180         .rm_mc_addr = hclge_rm_mc_addr,
9181         .set_autoneg = hclge_set_autoneg,
9182         .get_autoneg = hclge_get_autoneg,
9183         .restart_autoneg = hclge_restart_autoneg,
9184         .get_pauseparam = hclge_get_pauseparam,
9185         .set_pauseparam = hclge_set_pauseparam,
9186         .set_mtu = hclge_set_mtu,
9187         .reset_queue = hclge_reset_tqp,
9188         .get_stats = hclge_get_stats,
9189         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9190         .update_stats = hclge_update_stats,
9191         .get_strings = hclge_get_strings,
9192         .get_sset_count = hclge_get_sset_count,
9193         .get_fw_version = hclge_get_fw_version,
9194         .get_mdix_mode = hclge_get_mdix_mode,
9195         .enable_vlan_filter = hclge_enable_vlan_filter,
9196         .set_vlan_filter = hclge_set_vlan_filter,
9197         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9198         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9199         .reset_event = hclge_reset_event,
9200         .set_default_reset_request = hclge_set_def_reset_request,
9201         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9202         .set_channels = hclge_set_channels,
9203         .get_channels = hclge_get_channels,
9204         .get_regs_len = hclge_get_regs_len,
9205         .get_regs = hclge_get_regs,
9206         .set_led_id = hclge_set_led_id,
9207         .get_link_mode = hclge_get_link_mode,
9208         .add_fd_entry = hclge_add_fd_entry,
9209         .del_fd_entry = hclge_del_fd_entry,
9210         .del_all_fd_entries = hclge_del_all_fd_entries,
9211         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9212         .get_fd_rule_info = hclge_get_fd_rule_info,
9213         .get_fd_all_rules = hclge_get_all_rules,
9214         .restore_fd_rules = hclge_restore_fd_entries,
9215         .enable_fd = hclge_enable_fd,
9216         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9217         .dbg_run_cmd = hclge_dbg_run_cmd,
9218         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9219         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9220         .ae_dev_resetting = hclge_ae_dev_resetting,
9221         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9222         .set_gro_en = hclge_gro_en,
9223         .get_global_queue_id = hclge_covert_handle_qid_global,
9224         .set_timer_task = hclge_set_timer_task,
9225         .mac_connect_phy = hclge_mac_connect_phy,
9226         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9227 };
9228
9229 static struct hnae3_ae_algo ae_algo = {
9230         .ops = &hclge_ops,
9231         .pdev_id_table = ae_algo_pci_tbl,
9232 };
9233
9234 static int hclge_init(void)
9235 {
9236         pr_info("%s is initializing\n", HCLGE_NAME);
9237
9238         hnae3_register_ae_algo(&ae_algo);
9239
9240         return 0;
9241 }
9242
9243 static void hclge_exit(void)
9244 {
9245         hnae3_unregister_ae_algo(&ae_algo);
9246 }
9247 module_init(hclge_init);
9248 module_exit(hclge_exit);
9249
9250 MODULE_LICENSE("GPL");
9251 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9252 MODULE_DESCRIPTION("HCLGE Driver");
9253 MODULE_VERSION(HCLGE_MOD_VERSION);