Merge branch 'net-hns3-add-aRFS-feature-and-fix-FEC-bugs-for-HNS3-driver'
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37                                u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
40
41 static struct hnae3_ae_algo ae_algo;
42
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51         /* required last entry */
52         {0, }
53 };
54
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
56
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58                                          HCLGE_CMDQ_TX_ADDR_H_REG,
59                                          HCLGE_CMDQ_TX_DEPTH_REG,
60                                          HCLGE_CMDQ_TX_TAIL_REG,
61                                          HCLGE_CMDQ_TX_HEAD_REG,
62                                          HCLGE_CMDQ_RX_ADDR_L_REG,
63                                          HCLGE_CMDQ_RX_ADDR_H_REG,
64                                          HCLGE_CMDQ_RX_DEPTH_REG,
65                                          HCLGE_CMDQ_RX_TAIL_REG,
66                                          HCLGE_CMDQ_RX_HEAD_REG,
67                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
68                                          HCLGE_CMDQ_INTR_STS_REG,
69                                          HCLGE_CMDQ_INTR_EN_REG,
70                                          HCLGE_CMDQ_INTR_GEN_REG};
71
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73                                            HCLGE_VECTOR0_OTER_EN_REG,
74                                            HCLGE_MISC_RESET_STS_REG,
75                                            HCLGE_MISC_VECTOR_INT_STS,
76                                            HCLGE_GLOBAL_RESET_REG,
77                                            HCLGE_FUN_RST_ING,
78                                            HCLGE_GRO_EN_REG};
79
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81                                          HCLGE_RING_RX_ADDR_H_REG,
82                                          HCLGE_RING_RX_BD_NUM_REG,
83                                          HCLGE_RING_RX_BD_LENGTH_REG,
84                                          HCLGE_RING_RX_MERGE_EN_REG,
85                                          HCLGE_RING_RX_TAIL_REG,
86                                          HCLGE_RING_RX_HEAD_REG,
87                                          HCLGE_RING_RX_FBD_NUM_REG,
88                                          HCLGE_RING_RX_OFFSET_REG,
89                                          HCLGE_RING_RX_FBD_OFFSET_REG,
90                                          HCLGE_RING_RX_STASH_REG,
91                                          HCLGE_RING_RX_BD_ERR_REG,
92                                          HCLGE_RING_TX_ADDR_L_REG,
93                                          HCLGE_RING_TX_ADDR_H_REG,
94                                          HCLGE_RING_TX_BD_NUM_REG,
95                                          HCLGE_RING_TX_PRIORITY_REG,
96                                          HCLGE_RING_TX_TC_REG,
97                                          HCLGE_RING_TX_MERGE_EN_REG,
98                                          HCLGE_RING_TX_TAIL_REG,
99                                          HCLGE_RING_TX_HEAD_REG,
100                                          HCLGE_RING_TX_FBD_NUM_REG,
101                                          HCLGE_RING_TX_OFFSET_REG,
102                                          HCLGE_RING_TX_EBD_NUM_REG,
103                                          HCLGE_RING_TX_EBD_OFFSET_REG,
104                                          HCLGE_RING_TX_BD_ERR_REG,
105                                          HCLGE_RING_EN_REG};
106
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108                                              HCLGE_TQP_INTR_GL0_REG,
109                                              HCLGE_TQP_INTR_GL1_REG,
110                                              HCLGE_TQP_INTR_GL2_REG,
111                                              HCLGE_TQP_INTR_RL_REG};
112
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
114         "App    Loopback test",
115         "Serdes serial Loopback test",
116         "Serdes parallel Loopback test",
117         "Phy    Loopback test"
118 };
119
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121         {"mac_tx_mac_pause_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123         {"mac_rx_mac_pause_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125         {"mac_tx_control_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127         {"mac_rx_control_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129         {"mac_tx_pfc_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131         {"mac_tx_pfc_pri0_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133         {"mac_tx_pfc_pri1_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135         {"mac_tx_pfc_pri2_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137         {"mac_tx_pfc_pri3_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139         {"mac_tx_pfc_pri4_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141         {"mac_tx_pfc_pri5_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143         {"mac_tx_pfc_pri6_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145         {"mac_tx_pfc_pri7_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147         {"mac_rx_pfc_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149         {"mac_rx_pfc_pri0_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151         {"mac_rx_pfc_pri1_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153         {"mac_rx_pfc_pri2_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155         {"mac_rx_pfc_pri3_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157         {"mac_rx_pfc_pri4_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159         {"mac_rx_pfc_pri5_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161         {"mac_rx_pfc_pri6_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163         {"mac_rx_pfc_pri7_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165         {"mac_tx_total_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167         {"mac_tx_total_oct_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169         {"mac_tx_good_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171         {"mac_tx_bad_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173         {"mac_tx_good_oct_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175         {"mac_tx_bad_oct_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177         {"mac_tx_uni_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179         {"mac_tx_multi_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181         {"mac_tx_broad_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183         {"mac_tx_undersize_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185         {"mac_tx_oversize_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187         {"mac_tx_64_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189         {"mac_tx_65_127_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191         {"mac_tx_128_255_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193         {"mac_tx_256_511_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195         {"mac_tx_512_1023_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197         {"mac_tx_1024_1518_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199         {"mac_tx_1519_2047_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201         {"mac_tx_2048_4095_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203         {"mac_tx_4096_8191_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205         {"mac_tx_8192_9216_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207         {"mac_tx_9217_12287_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209         {"mac_tx_12288_16383_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211         {"mac_tx_1519_max_good_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213         {"mac_tx_1519_max_bad_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215         {"mac_rx_total_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217         {"mac_rx_total_oct_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219         {"mac_rx_good_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221         {"mac_rx_bad_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223         {"mac_rx_good_oct_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225         {"mac_rx_bad_oct_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227         {"mac_rx_uni_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229         {"mac_rx_multi_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231         {"mac_rx_broad_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233         {"mac_rx_undersize_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235         {"mac_rx_oversize_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237         {"mac_rx_64_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239         {"mac_rx_65_127_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241         {"mac_rx_128_255_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243         {"mac_rx_256_511_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245         {"mac_rx_512_1023_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247         {"mac_rx_1024_1518_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249         {"mac_rx_1519_2047_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251         {"mac_rx_2048_4095_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253         {"mac_rx_4096_8191_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255         {"mac_rx_8192_9216_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257         {"mac_rx_9217_12287_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259         {"mac_rx_12288_16383_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261         {"mac_rx_1519_max_good_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263         {"mac_rx_1519_max_bad_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
265
266         {"mac_tx_fragment_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268         {"mac_tx_undermin_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270         {"mac_tx_jabber_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272         {"mac_tx_err_all_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274         {"mac_tx_from_app_good_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276         {"mac_tx_from_app_bad_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278         {"mac_rx_fragment_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280         {"mac_rx_undermin_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282         {"mac_rx_jabber_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284         {"mac_rx_fcs_err_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286         {"mac_rx_send_app_good_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288         {"mac_rx_send_app_bad_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
290 };
291
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
293         {
294                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298                 .i_port_bitmap = 0x1,
299         },
300 };
301
302 static const u8 hclge_hash_key[] = {
303         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
308 };
309
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
311 {
312 #define HCLGE_MAC_CMD_NUM 21
313
314         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
316         __le64 *desc_data;
317         int i, k, n;
318         int ret;
319
320         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
322         if (ret) {
323                 dev_err(&hdev->pdev->dev,
324                         "Get MAC pkt stats fail, status = %d.\n", ret);
325
326                 return ret;
327         }
328
329         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330                 /* for special opcode 0032, only the first desc has the head */
331                 if (unlikely(i == 0)) {
332                         desc_data = (__le64 *)(&desc[i].data[0]);
333                         n = HCLGE_RD_FIRST_STATS_NUM;
334                 } else {
335                         desc_data = (__le64 *)(&desc[i]);
336                         n = HCLGE_RD_OTHER_STATS_NUM;
337                 }
338
339                 for (k = 0; k < n; k++) {
340                         *data += le64_to_cpu(*desc_data);
341                         data++;
342                         desc_data++;
343                 }
344         }
345
346         return 0;
347 }
348
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
350 {
351         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352         struct hclge_desc *desc;
353         __le64 *desc_data;
354         u16 i, k, n;
355         int ret;
356
357         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
358         if (!desc)
359                 return -ENOMEM;
360         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
362         if (ret) {
363                 kfree(desc);
364                 return ret;
365         }
366
367         for (i = 0; i < desc_num; i++) {
368                 /* for special opcode 0034, only the first desc has the head */
369                 if (i == 0) {
370                         desc_data = (__le64 *)(&desc[i].data[0]);
371                         n = HCLGE_RD_FIRST_STATS_NUM;
372                 } else {
373                         desc_data = (__le64 *)(&desc[i]);
374                         n = HCLGE_RD_OTHER_STATS_NUM;
375                 }
376
377                 for (k = 0; k < n; k++) {
378                         *data += le64_to_cpu(*desc_data);
379                         data++;
380                         desc_data++;
381                 }
382         }
383
384         kfree(desc);
385
386         return 0;
387 }
388
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
390 {
391         struct hclge_desc desc;
392         __le32 *desc_data;
393         u32 reg_num;
394         int ret;
395
396         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
398         if (ret)
399                 return ret;
400
401         desc_data = (__le32 *)(&desc.data[0]);
402         reg_num = le32_to_cpu(*desc_data);
403
404         *desc_num = 1 + ((reg_num - 3) >> 2) +
405                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
406
407         return 0;
408 }
409
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
411 {
412         u32 desc_num;
413         int ret;
414
415         ret = hclge_mac_query_reg_num(hdev, &desc_num);
416
417         /* The firmware supports the new statistics acquisition method */
418         if (!ret)
419                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420         else if (ret == -EOPNOTSUPP)
421                 ret = hclge_mac_update_stats_defective(hdev);
422         else
423                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
424
425         return ret;
426 }
427
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
429 {
430         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431         struct hclge_vport *vport = hclge_get_vport(handle);
432         struct hclge_dev *hdev = vport->back;
433         struct hnae3_queue *queue;
434         struct hclge_desc desc[1];
435         struct hclge_tqp *tqp;
436         int ret, i;
437
438         for (i = 0; i < kinfo->num_tqps; i++) {
439                 queue = handle->kinfo.tqp[i];
440                 tqp = container_of(queue, struct hclge_tqp, q);
441                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442                 hclge_cmd_setup_basic_desc(&desc[0],
443                                            HCLGE_OPC_QUERY_RX_STATUS,
444                                            true);
445
446                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
448                 if (ret) {
449                         dev_err(&hdev->pdev->dev,
450                                 "Query tqp stat fail, status = %d,queue = %d\n",
451                                 ret,    i);
452                         return ret;
453                 }
454                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455                         le32_to_cpu(desc[0].data[1]);
456         }
457
458         for (i = 0; i < kinfo->num_tqps; i++) {
459                 queue = handle->kinfo.tqp[i];
460                 tqp = container_of(queue, struct hclge_tqp, q);
461                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462                 hclge_cmd_setup_basic_desc(&desc[0],
463                                            HCLGE_OPC_QUERY_TX_STATUS,
464                                            true);
465
466                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
468                 if (ret) {
469                         dev_err(&hdev->pdev->dev,
470                                 "Query tqp stat fail, status = %d,queue = %d\n",
471                                 ret, i);
472                         return ret;
473                 }
474                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475                         le32_to_cpu(desc[0].data[1]);
476         }
477
478         return 0;
479 }
480
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
482 {
483         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484         struct hclge_tqp *tqp;
485         u64 *buff = data;
486         int i;
487
488         for (i = 0; i < kinfo->num_tqps; i++) {
489                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
491         }
492
493         for (i = 0; i < kinfo->num_tqps; i++) {
494                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
496         }
497
498         return buff;
499 }
500
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
502 {
503         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
504
505         return kinfo->num_tqps * (2);
506 }
507
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
509 {
510         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
511         u8 *buff = data;
512         int i = 0;
513
514         for (i = 0; i < kinfo->num_tqps; i++) {
515                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516                         struct hclge_tqp, q);
517                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
518                          tqp->index);
519                 buff = buff + ETH_GSTRING_LEN;
520         }
521
522         for (i = 0; i < kinfo->num_tqps; i++) {
523                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524                         struct hclge_tqp, q);
525                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
526                          tqp->index);
527                 buff = buff + ETH_GSTRING_LEN;
528         }
529
530         return buff;
531 }
532
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534                                  const struct hclge_comm_stats_str strs[],
535                                  int size, u64 *data)
536 {
537         u64 *buf = data;
538         u32 i;
539
540         for (i = 0; i < size; i++)
541                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
542
543         return buf + size;
544 }
545
546 static u8 *hclge_comm_get_strings(u32 stringset,
547                                   const struct hclge_comm_stats_str strs[],
548                                   int size, u8 *data)
549 {
550         char *buff = (char *)data;
551         u32 i;
552
553         if (stringset != ETH_SS_STATS)
554                 return buff;
555
556         for (i = 0; i < size; i++) {
557                 snprintf(buff, ETH_GSTRING_LEN,
558                          strs[i].desc);
559                 buff = buff + ETH_GSTRING_LEN;
560         }
561
562         return (u8 *)buff;
563 }
564
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
566 {
567         struct hnae3_handle *handle;
568         int status;
569
570         handle = &hdev->vport[0].nic;
571         if (handle->client) {
572                 status = hclge_tqps_update_stats(handle);
573                 if (status) {
574                         dev_err(&hdev->pdev->dev,
575                                 "Update TQPS stats fail, status = %d.\n",
576                                 status);
577                 }
578         }
579
580         status = hclge_mac_update_stats(hdev);
581         if (status)
582                 dev_err(&hdev->pdev->dev,
583                         "Update MAC stats fail, status = %d.\n", status);
584 }
585
586 static void hclge_update_stats(struct hnae3_handle *handle,
587                                struct net_device_stats *net_stats)
588 {
589         struct hclge_vport *vport = hclge_get_vport(handle);
590         struct hclge_dev *hdev = vport->back;
591         int status;
592
593         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
594                 return;
595
596         status = hclge_mac_update_stats(hdev);
597         if (status)
598                 dev_err(&hdev->pdev->dev,
599                         "Update MAC stats fail, status = %d.\n",
600                         status);
601
602         status = hclge_tqps_update_stats(handle);
603         if (status)
604                 dev_err(&hdev->pdev->dev,
605                         "Update TQPS stats fail, status = %d.\n",
606                         status);
607
608         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
609 }
610
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614                 HNAE3_SUPPORT_PHY_LOOPBACK |\
615                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
617
618         struct hclge_vport *vport = hclge_get_vport(handle);
619         struct hclge_dev *hdev = vport->back;
620         int count = 0;
621
622         /* Loopback test support rules:
623          * mac: only GE mode support
624          * serdes: all mac mode will support include GE/XGE/LGE/CGE
625          * phy: only support when phy device exist on board
626          */
627         if (stringset == ETH_SS_TEST) {
628                 /* clear loopback bit flags at first */
629                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630                 if (hdev->pdev->revision >= 0x21 ||
631                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
634                         count += 1;
635                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
636                 }
637
638                 count += 2;
639                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641         } else if (stringset == ETH_SS_STATS) {
642                 count = ARRAY_SIZE(g_mac_stats_string) +
643                         hclge_tqps_get_sset_count(handle, stringset);
644         }
645
646         return count;
647 }
648
649 static void hclge_get_strings(struct hnae3_handle *handle,
650                               u32 stringset,
651                               u8 *data)
652 {
653         u8 *p = (char *)data;
654         int size;
655
656         if (stringset == ETH_SS_STATS) {
657                 size = ARRAY_SIZE(g_mac_stats_string);
658                 p = hclge_comm_get_strings(stringset,
659                                            g_mac_stats_string,
660                                            size,
661                                            p);
662                 p = hclge_tqps_get_strings(handle, p);
663         } else if (stringset == ETH_SS_TEST) {
664                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
665                         memcpy(p,
666                                hns3_nic_test_strs[HNAE3_LOOP_APP],
667                                ETH_GSTRING_LEN);
668                         p += ETH_GSTRING_LEN;
669                 }
670                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
671                         memcpy(p,
672                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
673                                ETH_GSTRING_LEN);
674                         p += ETH_GSTRING_LEN;
675                 }
676                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
677                         memcpy(p,
678                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
679                                ETH_GSTRING_LEN);
680                         p += ETH_GSTRING_LEN;
681                 }
682                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
683                         memcpy(p,
684                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
685                                ETH_GSTRING_LEN);
686                         p += ETH_GSTRING_LEN;
687                 }
688         }
689 }
690
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
692 {
693         struct hclge_vport *vport = hclge_get_vport(handle);
694         struct hclge_dev *hdev = vport->back;
695         u64 *p;
696
697         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
698                                  g_mac_stats_string,
699                                  ARRAY_SIZE(g_mac_stats_string),
700                                  data);
701         p = hclge_tqps_get_stats(handle, p);
702 }
703
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
705                                      u64 *rx_cnt)
706 {
707         struct hclge_vport *vport = hclge_get_vport(handle);
708         struct hclge_dev *hdev = vport->back;
709
710         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
712 }
713
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715                                    struct hclge_func_status_cmd *status)
716 {
717         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
718                 return -EINVAL;
719
720         /* Set the pf to main pf */
721         if (status->pf_state & HCLGE_PF_STATE_MAIN)
722                 hdev->flag |= HCLGE_FLAG_MAIN;
723         else
724                 hdev->flag &= ~HCLGE_FLAG_MAIN;
725
726         return 0;
727 }
728
729 static int hclge_query_function_status(struct hclge_dev *hdev)
730 {
731         struct hclge_func_status_cmd *req;
732         struct hclge_desc desc;
733         int timeout = 0;
734         int ret;
735
736         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737         req = (struct hclge_func_status_cmd *)desc.data;
738
739         do {
740                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
741                 if (ret) {
742                         dev_err(&hdev->pdev->dev,
743                                 "query function status failed %d.\n",
744                                 ret);
745
746                         return ret;
747                 }
748
749                 /* Check pf reset is done */
750                 if (req->pf_state)
751                         break;
752                 usleep_range(1000, 2000);
753         } while (timeout++ < 5);
754
755         ret = hclge_parse_func_status(hdev, req);
756
757         return ret;
758 }
759
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
761 {
762         struct hclge_pf_res_cmd *req;
763         struct hclge_desc desc;
764         int ret;
765
766         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
768         if (ret) {
769                 dev_err(&hdev->pdev->dev,
770                         "query pf resource failed %d.\n", ret);
771                 return ret;
772         }
773
774         req = (struct hclge_pf_res_cmd *)desc.data;
775         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
777
778         if (req->tx_buf_size)
779                 hdev->tx_buf_size =
780                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
781         else
782                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
783
784         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
785
786         if (req->dv_buf_size)
787                 hdev->dv_buf_size =
788                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
789         else
790                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
791
792         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
793
794         if (hnae3_dev_roce_supported(hdev)) {
795                 hdev->roce_base_msix_offset =
796                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
798                 hdev->num_roce_msi =
799                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
801
802                 /* PF should have NIC vectors and Roce vectors,
803                  * NIC vectors are queued before Roce vectors.
804                  */
805                 hdev->num_msi = hdev->num_roce_msi  +
806                                 hdev->roce_base_msix_offset;
807         } else {
808                 hdev->num_msi =
809                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
811         }
812
813         return 0;
814 }
815
816 static int hclge_parse_speed(int speed_cmd, int *speed)
817 {
818         switch (speed_cmd) {
819         case 6:
820                 *speed = HCLGE_MAC_SPEED_10M;
821                 break;
822         case 7:
823                 *speed = HCLGE_MAC_SPEED_100M;
824                 break;
825         case 0:
826                 *speed = HCLGE_MAC_SPEED_1G;
827                 break;
828         case 1:
829                 *speed = HCLGE_MAC_SPEED_10G;
830                 break;
831         case 2:
832                 *speed = HCLGE_MAC_SPEED_25G;
833                 break;
834         case 3:
835                 *speed = HCLGE_MAC_SPEED_40G;
836                 break;
837         case 4:
838                 *speed = HCLGE_MAC_SPEED_50G;
839                 break;
840         case 5:
841                 *speed = HCLGE_MAC_SPEED_100G;
842                 break;
843         default:
844                 return -EINVAL;
845         }
846
847         return 0;
848 }
849
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
851 {
852         struct hclge_vport *vport = hclge_get_vport(handle);
853         struct hclge_dev *hdev = vport->back;
854         u32 speed_ability = hdev->hw.mac.speed_ability;
855         u32 speed_bit = 0;
856
857         switch (speed) {
858         case HCLGE_MAC_SPEED_10M:
859                 speed_bit = HCLGE_SUPPORT_10M_BIT;
860                 break;
861         case HCLGE_MAC_SPEED_100M:
862                 speed_bit = HCLGE_SUPPORT_100M_BIT;
863                 break;
864         case HCLGE_MAC_SPEED_1G:
865                 speed_bit = HCLGE_SUPPORT_1G_BIT;
866                 break;
867         case HCLGE_MAC_SPEED_10G:
868                 speed_bit = HCLGE_SUPPORT_10G_BIT;
869                 break;
870         case HCLGE_MAC_SPEED_25G:
871                 speed_bit = HCLGE_SUPPORT_25G_BIT;
872                 break;
873         case HCLGE_MAC_SPEED_40G:
874                 speed_bit = HCLGE_SUPPORT_40G_BIT;
875                 break;
876         case HCLGE_MAC_SPEED_50G:
877                 speed_bit = HCLGE_SUPPORT_50G_BIT;
878                 break;
879         case HCLGE_MAC_SPEED_100G:
880                 speed_bit = HCLGE_SUPPORT_100G_BIT;
881                 break;
882         default:
883                 return -EINVAL;
884         }
885
886         if (speed_bit & speed_ability)
887                 return 0;
888
889         return -EINVAL;
890 }
891
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
893 {
894         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
896                                  mac->supported);
897         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
899                                  mac->supported);
900         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
902                                  mac->supported);
903         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
905                                  mac->supported);
906         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
908                                  mac->supported);
909 }
910
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
912 {
913         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
915                                  mac->supported);
916         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
918                                  mac->supported);
919         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
921                                  mac->supported);
922         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
924                                  mac->supported);
925         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
927                                  mac->supported);
928 }
929
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
931 {
932         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
934                                  mac->supported);
935         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
937                                  mac->supported);
938         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
940                                  mac->supported);
941         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
943                                  mac->supported);
944         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
946                                  mac->supported);
947 }
948
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
950 {
951         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
953                                  mac->supported);
954         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
956                                  mac->supported);
957         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
959                                  mac->supported);
960         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
962                                  mac->supported);
963         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
965                                  mac->supported);
966         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
968                                  mac->supported);
969 }
970
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
972 {
973         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
975
976         switch (mac->speed) {
977         case HCLGE_MAC_SPEED_10G:
978         case HCLGE_MAC_SPEED_40G:
979                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
980                                  mac->supported);
981                 mac->fec_ability =
982                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
983                 break;
984         case HCLGE_MAC_SPEED_25G:
985         case HCLGE_MAC_SPEED_50G:
986                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
987                                  mac->supported);
988                 mac->fec_ability =
989                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
990                         BIT(HNAE3_FEC_AUTO);
991                 break;
992         case HCLGE_MAC_SPEED_100G:
993                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
995                 break;
996         default:
997                 mac->fec_ability = 0;
998                 break;
999         }
1000 }
1001
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1003                                         u8 speed_ability)
1004 {
1005         struct hclge_mac *mac = &hdev->hw.mac;
1006
1007         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1009                                  mac->supported);
1010
1011         hclge_convert_setting_sr(mac, speed_ability);
1012         hclge_convert_setting_lr(mac, speed_ability);
1013         hclge_convert_setting_cr(mac, speed_ability);
1014         if (hdev->pdev->revision >= 0x21)
1015                 hclge_convert_setting_fec(mac);
1016
1017         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1020 }
1021
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1023                                             u8 speed_ability)
1024 {
1025         struct hclge_mac *mac = &hdev->hw.mac;
1026
1027         hclge_convert_setting_kr(mac, speed_ability);
1028         if (hdev->pdev->revision >= 0x21)
1029                 hclge_convert_setting_fec(mac);
1030         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1033 }
1034
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1036                                          u8 speed_ability)
1037 {
1038         unsigned long *supported = hdev->hw.mac.supported;
1039
1040         /* default to support all speed for GE port */
1041         if (!speed_ability)
1042                 speed_ability = HCLGE_SUPPORT_GE;
1043
1044         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1046                                  supported);
1047
1048         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1050                                  supported);
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1052                                  supported);
1053         }
1054
1055         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1058         }
1059
1060         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1063 }
1064
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1066 {
1067         u8 media_type = hdev->hw.mac.media_type;
1068
1069         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072                 hclge_parse_copper_link_mode(hdev, speed_ability);
1073         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1075 }
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1077 {
1078         struct hclge_cfg_param_cmd *req;
1079         u64 mac_addr_tmp_high;
1080         u64 mac_addr_tmp;
1081         int i;
1082
1083         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1084
1085         /* get the configuration */
1086         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1087                                               HCLGE_CFG_VMDQ_M,
1088                                               HCLGE_CFG_VMDQ_S);
1089         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092                                             HCLGE_CFG_TQP_DESC_N_M,
1093                                             HCLGE_CFG_TQP_DESC_N_S);
1094
1095         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096                                         HCLGE_CFG_PHY_ADDR_M,
1097                                         HCLGE_CFG_PHY_ADDR_S);
1098         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099                                           HCLGE_CFG_MEDIA_TP_M,
1100                                           HCLGE_CFG_MEDIA_TP_S);
1101         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102                                           HCLGE_CFG_RX_BUF_LEN_M,
1103                                           HCLGE_CFG_RX_BUF_LEN_S);
1104         /* get mac_address */
1105         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107                                             HCLGE_CFG_MAC_ADDR_H_M,
1108                                             HCLGE_CFG_MAC_ADDR_H_S);
1109
1110         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1111
1112         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113                                              HCLGE_CFG_DEFAULT_SPEED_M,
1114                                              HCLGE_CFG_DEFAULT_SPEED_S);
1115         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116                                             HCLGE_CFG_RSS_SIZE_M,
1117                                             HCLGE_CFG_RSS_SIZE_S);
1118
1119         for (i = 0; i < ETH_ALEN; i++)
1120                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1121
1122         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1124
1125         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126                                              HCLGE_CFG_SPEED_ABILITY_M,
1127                                              HCLGE_CFG_SPEED_ABILITY_S);
1128         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1130                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1131         if (!cfg->umv_space)
1132                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1133 }
1134
1135 /* hclge_get_cfg: query the static parameter from flash
1136  * @hdev: pointer to struct hclge_dev
1137  * @hcfg: the config structure to be getted
1138  */
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1140 {
1141         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142         struct hclge_cfg_param_cmd *req;
1143         int i, ret;
1144
1145         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1146                 u32 offset = 0;
1147
1148                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1150                                            true);
1151                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153                 /* Len should be united by 4 bytes when send to hardware */
1154                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156                 req->offset = cpu_to_le32(offset);
1157         }
1158
1159         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1160         if (ret) {
1161                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1162                 return ret;
1163         }
1164
1165         hclge_parse_cfg(hcfg, desc);
1166
1167         return 0;
1168 }
1169
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1171 {
1172         int ret;
1173
1174         ret = hclge_query_function_status(hdev);
1175         if (ret) {
1176                 dev_err(&hdev->pdev->dev,
1177                         "query function status error %d.\n", ret);
1178                 return ret;
1179         }
1180
1181         /* get pf resource */
1182         ret = hclge_query_pf_resource(hdev);
1183         if (ret)
1184                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1185
1186         return ret;
1187 }
1188
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1190 {
1191 #define HCLGE_MIN_TX_DESC       64
1192 #define HCLGE_MIN_RX_DESC       64
1193
1194         if (!is_kdump_kernel())
1195                 return;
1196
1197         dev_info(&hdev->pdev->dev,
1198                  "Running kdump kernel. Using minimal resources\n");
1199
1200         /* minimal queue pairs equals to the number of vports */
1201         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1204 }
1205
1206 static int hclge_configure(struct hclge_dev *hdev)
1207 {
1208         struct hclge_cfg cfg;
1209         int ret, i;
1210
1211         ret = hclge_get_cfg(hdev, &cfg);
1212         if (ret) {
1213                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1214                 return ret;
1215         }
1216
1217         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218         hdev->base_tqp_pid = 0;
1219         hdev->rss_size_max = cfg.rss_size_max;
1220         hdev->rx_buf_len = cfg.rx_buf_len;
1221         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222         hdev->hw.mac.media_type = cfg.media_type;
1223         hdev->hw.mac.phy_addr = cfg.phy_addr;
1224         hdev->num_tx_desc = cfg.tqp_desc_num;
1225         hdev->num_rx_desc = cfg.tqp_desc_num;
1226         hdev->tm_info.num_pg = 1;
1227         hdev->tc_max = cfg.tc_num;
1228         hdev->tm_info.hw_pfc_map = 0;
1229         hdev->wanted_umv_size = cfg.umv_space;
1230
1231         if (hnae3_dev_fd_supported(hdev)) {
1232                 hdev->fd_en = true;
1233                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1234         }
1235
1236         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1237         if (ret) {
1238                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1239                 return ret;
1240         }
1241
1242         hclge_parse_link_mode(hdev, cfg.speed_ability);
1243
1244         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245             (hdev->tc_max < 1)) {
1246                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1247                          hdev->tc_max);
1248                 hdev->tc_max = 1;
1249         }
1250
1251         /* Dev does not support DCB */
1252         if (!hnae3_dev_dcb_supported(hdev)) {
1253                 hdev->tc_max = 1;
1254                 hdev->pfc_max = 0;
1255         } else {
1256                 hdev->pfc_max = hdev->tc_max;
1257         }
1258
1259         hdev->tm_info.num_tc = 1;
1260
1261         /* Currently not support uncontiuous tc */
1262         for (i = 0; i < hdev->tm_info.num_tc; i++)
1263                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1264
1265         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1266
1267         hclge_init_kdump_kernel_config(hdev);
1268
1269         return ret;
1270 }
1271
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1273                             int tso_mss_max)
1274 {
1275         struct hclge_cfg_tso_status_cmd *req;
1276         struct hclge_desc desc;
1277         u16 tso_mss;
1278
1279         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1280
1281         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1282
1283         tso_mss = 0;
1284         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286         req->tso_mss_min = cpu_to_le16(tso_mss);
1287
1288         tso_mss = 0;
1289         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291         req->tso_mss_max = cpu_to_le16(tso_mss);
1292
1293         return hclge_cmd_send(&hdev->hw, &desc, 1);
1294 }
1295
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1297 {
1298         struct hclge_cfg_gro_status_cmd *req;
1299         struct hclge_desc desc;
1300         int ret;
1301
1302         if (!hnae3_dev_gro_supported(hdev))
1303                 return 0;
1304
1305         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1307
1308         req->gro_en = cpu_to_le16(en ? 1 : 0);
1309
1310         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1311         if (ret)
1312                 dev_err(&hdev->pdev->dev,
1313                         "GRO hardware config cmd failed, ret = %d\n", ret);
1314
1315         return ret;
1316 }
1317
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1319 {
1320         struct hclge_tqp *tqp;
1321         int i;
1322
1323         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1325         if (!hdev->htqp)
1326                 return -ENOMEM;
1327
1328         tqp = hdev->htqp;
1329
1330         for (i = 0; i < hdev->num_tqps; i++) {
1331                 tqp->dev = &hdev->pdev->dev;
1332                 tqp->index = i;
1333
1334                 tqp->q.ae_algo = &ae_algo;
1335                 tqp->q.buf_size = hdev->rx_buf_len;
1336                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339                         i * HCLGE_TQP_REG_SIZE;
1340
1341                 tqp++;
1342         }
1343
1344         return 0;
1345 }
1346
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1349 {
1350         struct hclge_tqp_map_cmd *req;
1351         struct hclge_desc desc;
1352         int ret;
1353
1354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1355
1356         req = (struct hclge_tqp_map_cmd *)desc.data;
1357         req->tqp_id = cpu_to_le16(tqp_pid);
1358         req->tqp_vf = func_id;
1359         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360                         1 << HCLGE_TQP_MAP_EN_B;
1361         req->tqp_vid = cpu_to_le16(tqp_vid);
1362
1363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1364         if (ret)
1365                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1366
1367         return ret;
1368 }
1369
1370 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1371 {
1372         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373         struct hclge_dev *hdev = vport->back;
1374         int i, alloced;
1375
1376         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377              alloced < num_tqps; i++) {
1378                 if (!hdev->htqp[i].alloced) {
1379                         hdev->htqp[i].q.handle = &vport->nic;
1380                         hdev->htqp[i].q.tqp_index = alloced;
1381                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384                         hdev->htqp[i].alloced = true;
1385                         alloced++;
1386                 }
1387         }
1388         vport->alloc_tqps = alloced;
1389         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1391
1392         return 0;
1393 }
1394
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396                             u16 num_tx_desc, u16 num_rx_desc)
1397
1398 {
1399         struct hnae3_handle *nic = &vport->nic;
1400         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401         struct hclge_dev *hdev = vport->back;
1402         int ret;
1403
1404         kinfo->num_tx_desc = num_tx_desc;
1405         kinfo->num_rx_desc = num_rx_desc;
1406
1407         kinfo->rx_buf_len = hdev->rx_buf_len;
1408
1409         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1411         if (!kinfo->tqp)
1412                 return -ENOMEM;
1413
1414         ret = hclge_assign_tqp(vport, num_tqps);
1415         if (ret)
1416                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1417
1418         return ret;
1419 }
1420
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422                                   struct hclge_vport *vport)
1423 {
1424         struct hnae3_handle *nic = &vport->nic;
1425         struct hnae3_knic_private_info *kinfo;
1426         u16 i;
1427
1428         kinfo = &nic->kinfo;
1429         for (i = 0; i < vport->alloc_tqps; i++) {
1430                 struct hclge_tqp *q =
1431                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1432                 bool is_pf;
1433                 int ret;
1434
1435                 is_pf = !(vport->vport_id);
1436                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1437                                              i, is_pf);
1438                 if (ret)
1439                         return ret;
1440         }
1441
1442         return 0;
1443 }
1444
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1446 {
1447         struct hclge_vport *vport = hdev->vport;
1448         u16 i, num_vport;
1449
1450         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451         for (i = 0; i < num_vport; i++) {
1452                 int ret;
1453
1454                 ret = hclge_map_tqp_to_vport(hdev, vport);
1455                 if (ret)
1456                         return ret;
1457
1458                 vport++;
1459         }
1460
1461         return 0;
1462 }
1463
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1465 {
1466         /* this would be initialized later */
1467 }
1468
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1470 {
1471         struct hnae3_handle *nic = &vport->nic;
1472         struct hclge_dev *hdev = vport->back;
1473         int ret;
1474
1475         nic->pdev = hdev->pdev;
1476         nic->ae_algo = &ae_algo;
1477         nic->numa_node_mask = hdev->numa_node_mask;
1478
1479         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480                 ret = hclge_knic_setup(vport, num_tqps,
1481                                        hdev->num_tx_desc, hdev->num_rx_desc);
1482
1483                 if (ret) {
1484                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1485                                 ret);
1486                         return ret;
1487                 }
1488         } else {
1489                 hclge_unic_setup(vport, num_tqps);
1490         }
1491
1492         return 0;
1493 }
1494
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1496 {
1497         struct pci_dev *pdev = hdev->pdev;
1498         struct hclge_vport *vport;
1499         u32 tqp_main_vport;
1500         u32 tqp_per_vport;
1501         int num_vport, i;
1502         int ret;
1503
1504         /* We need to alloc a vport for main NIC of PF */
1505         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1506
1507         if (hdev->num_tqps < num_vport) {
1508                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509                         hdev->num_tqps, num_vport);
1510                 return -EINVAL;
1511         }
1512
1513         /* Alloc the same number of TQPs for every vport */
1514         tqp_per_vport = hdev->num_tqps / num_vport;
1515         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1516
1517         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1518                              GFP_KERNEL);
1519         if (!vport)
1520                 return -ENOMEM;
1521
1522         hdev->vport = vport;
1523         hdev->num_alloc_vport = num_vport;
1524
1525         if (IS_ENABLED(CONFIG_PCI_IOV))
1526                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1527
1528         for (i = 0; i < num_vport; i++) {
1529                 vport->back = hdev;
1530                 vport->vport_id = i;
1531                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534                 INIT_LIST_HEAD(&vport->vlan_list);
1535                 INIT_LIST_HEAD(&vport->uc_mac_list);
1536                 INIT_LIST_HEAD(&vport->mc_mac_list);
1537
1538                 if (i == 0)
1539                         ret = hclge_vport_setup(vport, tqp_main_vport);
1540                 else
1541                         ret = hclge_vport_setup(vport, tqp_per_vport);
1542                 if (ret) {
1543                         dev_err(&pdev->dev,
1544                                 "vport setup failed for vport %d, %d\n",
1545                                 i, ret);
1546                         return ret;
1547                 }
1548
1549                 vport++;
1550         }
1551
1552         return 0;
1553 }
1554
1555 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556                                     struct hclge_pkt_buf_alloc *buf_alloc)
1557 {
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1561         struct hclge_tx_buff_alloc_cmd *req;
1562         struct hclge_desc desc;
1563         int ret;
1564         u8 i;
1565
1566         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1567
1568         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1571
1572                 req->tx_pkt_buff[i] =
1573                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1575         }
1576
1577         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1578         if (ret)
1579                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1580                         ret);
1581
1582         return ret;
1583 }
1584
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586                                  struct hclge_pkt_buf_alloc *buf_alloc)
1587 {
1588         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1589
1590         if (ret)
1591                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1592
1593         return ret;
1594 }
1595
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1597 {
1598         int i, cnt = 0;
1599
1600         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601                 if (hdev->hw_tc_map & BIT(i))
1602                         cnt++;
1603         return cnt;
1604 }
1605
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608                                   struct hclge_pkt_buf_alloc *buf_alloc)
1609 {
1610         struct hclge_priv_buf *priv;
1611         int i, cnt = 0;
1612
1613         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614                 priv = &buf_alloc->priv_buf[i];
1615                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1616                     priv->enable)
1617                         cnt++;
1618         }
1619
1620         return cnt;
1621 }
1622
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625                                      struct hclge_pkt_buf_alloc *buf_alloc)
1626 {
1627         struct hclge_priv_buf *priv;
1628         int i, cnt = 0;
1629
1630         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631                 priv = &buf_alloc->priv_buf[i];
1632                 if (hdev->hw_tc_map & BIT(i) &&
1633                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1634                     priv->enable)
1635                         cnt++;
1636         }
1637
1638         return cnt;
1639 }
1640
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1642 {
1643         struct hclge_priv_buf *priv;
1644         u32 rx_priv = 0;
1645         int i;
1646
1647         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648                 priv = &buf_alloc->priv_buf[i];
1649                 if (priv->enable)
1650                         rx_priv += priv->buf_size;
1651         }
1652         return rx_priv;
1653 }
1654
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1656 {
1657         u32 i, total_tx_size = 0;
1658
1659         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1661
1662         return total_tx_size;
1663 }
1664
1665 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666                                 struct hclge_pkt_buf_alloc *buf_alloc,
1667                                 u32 rx_all)
1668 {
1669         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670         u32 tc_num = hclge_get_tc_num(hdev);
1671         u32 shared_buf, aligned_mps;
1672         u32 rx_priv;
1673         int i;
1674
1675         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1676
1677         if (hnae3_dev_dcb_supported(hdev))
1678                 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1679         else
1680                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681                                         + hdev->dv_buf_size;
1682
1683         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685                              HCLGE_BUF_SIZE_UNIT);
1686
1687         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688         if (rx_all < rx_priv + shared_std)
1689                 return false;
1690
1691         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692         buf_alloc->s_buf.buf_size = shared_buf;
1693         if (hnae3_dev_dcb_supported(hdev)) {
1694                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696                         - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1697         } else {
1698                 buf_alloc->s_buf.self.high = aligned_mps +
1699                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700                 buf_alloc->s_buf.self.low = aligned_mps;
1701         }
1702
1703         if (hnae3_dev_dcb_supported(hdev)) {
1704                 if (tc_num)
1705                         hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1706                 else
1707                         hi_thrd = shared_buf - hdev->dv_buf_size;
1708
1709                 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711                 lo_thrd = hi_thrd - aligned_mps / 2;
1712         } else {
1713                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714                 lo_thrd = aligned_mps;
1715         }
1716
1717         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1720         }
1721
1722         return true;
1723 }
1724
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726                                 struct hclge_pkt_buf_alloc *buf_alloc)
1727 {
1728         u32 i, total_size;
1729
1730         total_size = hdev->pkt_buf_size;
1731
1732         /* alloc tx buffer for all enabled tc */
1733         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1735
1736                 if (hdev->hw_tc_map & BIT(i)) {
1737                         if (total_size < hdev->tx_buf_size)
1738                                 return -ENOMEM;
1739
1740                         priv->tx_buf_size = hdev->tx_buf_size;
1741                 } else {
1742                         priv->tx_buf_size = 0;
1743                 }
1744
1745                 total_size -= priv->tx_buf_size;
1746         }
1747
1748         return 0;
1749 }
1750
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752                                   struct hclge_pkt_buf_alloc *buf_alloc)
1753 {
1754         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1756         int i;
1757
1758         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1760
1761                 priv->enable = 0;
1762                 priv->wl.low = 0;
1763                 priv->wl.high = 0;
1764                 priv->buf_size = 0;
1765
1766                 if (!(hdev->hw_tc_map & BIT(i)))
1767                         continue;
1768
1769                 priv->enable = 1;
1770
1771                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772                         priv->wl.low = max ? aligned_mps : 256;
1773                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774                                                 HCLGE_BUF_SIZE_UNIT);
1775                 } else {
1776                         priv->wl.low = 0;
1777                         priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1778                 }
1779
1780                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1781         }
1782
1783         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1784 }
1785
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787                                           struct hclge_pkt_buf_alloc *buf_alloc)
1788 {
1789         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1791         int i;
1792
1793         /* let the last to be cleared first */
1794         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1796
1797                 if (hdev->hw_tc_map & BIT(i) &&
1798                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799                         /* Clear the no pfc TC private buffer */
1800                         priv->wl.low = 0;
1801                         priv->wl.high = 0;
1802                         priv->buf_size = 0;
1803                         priv->enable = 0;
1804                         no_pfc_priv_num--;
1805                 }
1806
1807                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808                     no_pfc_priv_num == 0)
1809                         break;
1810         }
1811
1812         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1813 }
1814
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816                                         struct hclge_pkt_buf_alloc *buf_alloc)
1817 {
1818         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1820         int i;
1821
1822         /* let the last to be cleared first */
1823         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1825
1826                 if (hdev->hw_tc_map & BIT(i) &&
1827                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1828                         /* Reduce the number of pfc TC with private buffer */
1829                         priv->wl.low = 0;
1830                         priv->enable = 0;
1831                         priv->wl.high = 0;
1832                         priv->buf_size = 0;
1833                         pfc_priv_num--;
1834                 }
1835
1836                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1837                     pfc_priv_num == 0)
1838                         break;
1839         }
1840
1841         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1842 }
1843
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845  * @hdev: pointer to struct hclge_dev
1846  * @buf_alloc: pointer to buffer calculation data
1847  * @return: 0: calculate sucessful, negative: fail
1848  */
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850                                 struct hclge_pkt_buf_alloc *buf_alloc)
1851 {
1852         /* When DCB is not supported, rx private buffer is not allocated. */
1853         if (!hnae3_dev_dcb_supported(hdev)) {
1854                 u32 rx_all = hdev->pkt_buf_size;
1855
1856                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1858                         return -ENOMEM;
1859
1860                 return 0;
1861         }
1862
1863         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1864                 return 0;
1865
1866         /* try to decrease the buffer size */
1867         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1868                 return 0;
1869
1870         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1871                 return 0;
1872
1873         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1874                 return 0;
1875
1876         return -ENOMEM;
1877 }
1878
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880                                    struct hclge_pkt_buf_alloc *buf_alloc)
1881 {
1882         struct hclge_rx_priv_buff_cmd *req;
1883         struct hclge_desc desc;
1884         int ret;
1885         int i;
1886
1887         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1889
1890         /* Alloc private buffer TCs */
1891         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1893
1894                 req->buf_num[i] =
1895                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1896                 req->buf_num[i] |=
1897                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1898         }
1899
1900         req->shared_buf =
1901                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1903
1904         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1905         if (ret)
1906                 dev_err(&hdev->pdev->dev,
1907                         "rx private buffer alloc cmd failed %d\n", ret);
1908
1909         return ret;
1910 }
1911
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913                                    struct hclge_pkt_buf_alloc *buf_alloc)
1914 {
1915         struct hclge_rx_priv_wl_buf *req;
1916         struct hclge_priv_buf *priv;
1917         struct hclge_desc desc[2];
1918         int i, j;
1919         int ret;
1920
1921         for (i = 0; i < 2; i++) {
1922                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1923                                            false);
1924                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1925
1926                 /* The first descriptor set the NEXT bit to 1 */
1927                 if (i == 0)
1928                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1929                 else
1930                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1931
1932                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1934
1935                         priv = &buf_alloc->priv_buf[idx];
1936                         req->tc_wl[j].high =
1937                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938                         req->tc_wl[j].high |=
1939                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1940                         req->tc_wl[j].low =
1941                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942                         req->tc_wl[j].low |=
1943                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1944                 }
1945         }
1946
1947         /* Send 2 descriptor at one time */
1948         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1949         if (ret)
1950                 dev_err(&hdev->pdev->dev,
1951                         "rx private waterline config cmd failed %d\n",
1952                         ret);
1953         return ret;
1954 }
1955
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957                                     struct hclge_pkt_buf_alloc *buf_alloc)
1958 {
1959         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960         struct hclge_rx_com_thrd *req;
1961         struct hclge_desc desc[2];
1962         struct hclge_tc_thrd *tc;
1963         int i, j;
1964         int ret;
1965
1966         for (i = 0; i < 2; i++) {
1967                 hclge_cmd_setup_basic_desc(&desc[i],
1968                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1970
1971                 /* The first descriptor set the NEXT bit to 1 */
1972                 if (i == 0)
1973                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1974                 else
1975                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1976
1977                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1979
1980                         req->com_thrd[j].high =
1981                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982                         req->com_thrd[j].high |=
1983                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984                         req->com_thrd[j].low =
1985                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986                         req->com_thrd[j].low |=
1987                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1988                 }
1989         }
1990
1991         /* Send 2 descriptors at one time */
1992         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1993         if (ret)
1994                 dev_err(&hdev->pdev->dev,
1995                         "common threshold config cmd failed %d\n", ret);
1996         return ret;
1997 }
1998
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000                                   struct hclge_pkt_buf_alloc *buf_alloc)
2001 {
2002         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003         struct hclge_rx_com_wl *req;
2004         struct hclge_desc desc;
2005         int ret;
2006
2007         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2008
2009         req = (struct hclge_rx_com_wl *)desc.data;
2010         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2012
2013         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2015
2016         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2017         if (ret)
2018                 dev_err(&hdev->pdev->dev,
2019                         "common waterline config cmd failed %d\n", ret);
2020
2021         return ret;
2022 }
2023
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2025 {
2026         struct hclge_pkt_buf_alloc *pkt_buf;
2027         int ret;
2028
2029         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2030         if (!pkt_buf)
2031                 return -ENOMEM;
2032
2033         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2034         if (ret) {
2035                 dev_err(&hdev->pdev->dev,
2036                         "could not calc tx buffer size for all TCs %d\n", ret);
2037                 goto out;
2038         }
2039
2040         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2041         if (ret) {
2042                 dev_err(&hdev->pdev->dev,
2043                         "could not alloc tx buffers %d\n", ret);
2044                 goto out;
2045         }
2046
2047         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2048         if (ret) {
2049                 dev_err(&hdev->pdev->dev,
2050                         "could not calc rx priv buffer size for all TCs %d\n",
2051                         ret);
2052                 goto out;
2053         }
2054
2055         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2056         if (ret) {
2057                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2058                         ret);
2059                 goto out;
2060         }
2061
2062         if (hnae3_dev_dcb_supported(hdev)) {
2063                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2064                 if (ret) {
2065                         dev_err(&hdev->pdev->dev,
2066                                 "could not configure rx private waterline %d\n",
2067                                 ret);
2068                         goto out;
2069                 }
2070
2071                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2072                 if (ret) {
2073                         dev_err(&hdev->pdev->dev,
2074                                 "could not configure common threshold %d\n",
2075                                 ret);
2076                         goto out;
2077                 }
2078         }
2079
2080         ret = hclge_common_wl_config(hdev, pkt_buf);
2081         if (ret)
2082                 dev_err(&hdev->pdev->dev,
2083                         "could not configure common waterline %d\n", ret);
2084
2085 out:
2086         kfree(pkt_buf);
2087         return ret;
2088 }
2089
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2091 {
2092         struct hnae3_handle *roce = &vport->roce;
2093         struct hnae3_handle *nic = &vport->nic;
2094
2095         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2096
2097         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098             vport->back->num_msi_left == 0)
2099                 return -EINVAL;
2100
2101         roce->rinfo.base_vector = vport->back->roce_base_vector;
2102
2103         roce->rinfo.netdev = nic->kinfo.netdev;
2104         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2105
2106         roce->pdev = nic->pdev;
2107         roce->ae_algo = nic->ae_algo;
2108         roce->numa_node_mask = nic->numa_node_mask;
2109
2110         return 0;
2111 }
2112
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2114 {
2115         struct pci_dev *pdev = hdev->pdev;
2116         int vectors;
2117         int i;
2118
2119         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2121         if (vectors < 0) {
2122                 dev_err(&pdev->dev,
2123                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2124                         vectors);
2125                 return vectors;
2126         }
2127         if (vectors < hdev->num_msi)
2128                 dev_warn(&hdev->pdev->dev,
2129                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130                          hdev->num_msi, vectors);
2131
2132         hdev->num_msi = vectors;
2133         hdev->num_msi_left = vectors;
2134         hdev->base_msi_vector = pdev->irq;
2135         hdev->roce_base_vector = hdev->base_msi_vector +
2136                                 hdev->roce_base_msix_offset;
2137
2138         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139                                            sizeof(u16), GFP_KERNEL);
2140         if (!hdev->vector_status) {
2141                 pci_free_irq_vectors(pdev);
2142                 return -ENOMEM;
2143         }
2144
2145         for (i = 0; i < hdev->num_msi; i++)
2146                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2147
2148         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149                                         sizeof(int), GFP_KERNEL);
2150         if (!hdev->vector_irq) {
2151                 pci_free_irq_vectors(pdev);
2152                 return -ENOMEM;
2153         }
2154
2155         return 0;
2156 }
2157
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2159 {
2160
2161         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162                 duplex = HCLGE_MAC_FULL;
2163
2164         return duplex;
2165 }
2166
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2168                                       u8 duplex)
2169 {
2170         struct hclge_config_mac_speed_dup_cmd *req;
2171         struct hclge_desc desc;
2172         int ret;
2173
2174         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2175
2176         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2177
2178         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2179
2180         switch (speed) {
2181         case HCLGE_MAC_SPEED_10M:
2182                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183                                 HCLGE_CFG_SPEED_S, 6);
2184                 break;
2185         case HCLGE_MAC_SPEED_100M:
2186                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187                                 HCLGE_CFG_SPEED_S, 7);
2188                 break;
2189         case HCLGE_MAC_SPEED_1G:
2190                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191                                 HCLGE_CFG_SPEED_S, 0);
2192                 break;
2193         case HCLGE_MAC_SPEED_10G:
2194                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195                                 HCLGE_CFG_SPEED_S, 1);
2196                 break;
2197         case HCLGE_MAC_SPEED_25G:
2198                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199                                 HCLGE_CFG_SPEED_S, 2);
2200                 break;
2201         case HCLGE_MAC_SPEED_40G:
2202                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203                                 HCLGE_CFG_SPEED_S, 3);
2204                 break;
2205         case HCLGE_MAC_SPEED_50G:
2206                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207                                 HCLGE_CFG_SPEED_S, 4);
2208                 break;
2209         case HCLGE_MAC_SPEED_100G:
2210                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211                                 HCLGE_CFG_SPEED_S, 5);
2212                 break;
2213         default:
2214                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2215                 return -EINVAL;
2216         }
2217
2218         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2219                       1);
2220
2221         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2222         if (ret) {
2223                 dev_err(&hdev->pdev->dev,
2224                         "mac speed/duplex config cmd failed %d.\n", ret);
2225                 return ret;
2226         }
2227
2228         return 0;
2229 }
2230
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2232 {
2233         int ret;
2234
2235         duplex = hclge_check_speed_dup(duplex, speed);
2236         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2237                 return 0;
2238
2239         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2240         if (ret)
2241                 return ret;
2242
2243         hdev->hw.mac.speed = speed;
2244         hdev->hw.mac.duplex = duplex;
2245
2246         return 0;
2247 }
2248
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2250                                      u8 duplex)
2251 {
2252         struct hclge_vport *vport = hclge_get_vport(handle);
2253         struct hclge_dev *hdev = vport->back;
2254
2255         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2256 }
2257
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2259 {
2260         struct hclge_config_auto_neg_cmd *req;
2261         struct hclge_desc desc;
2262         u32 flag = 0;
2263         int ret;
2264
2265         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2266
2267         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2270
2271         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2272         if (ret)
2273                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2274                         ret);
2275
2276         return ret;
2277 }
2278
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2280 {
2281         struct hclge_vport *vport = hclge_get_vport(handle);
2282         struct hclge_dev *hdev = vport->back;
2283
2284         if (!hdev->hw.mac.support_autoneg) {
2285                 if (enable) {
2286                         dev_err(&hdev->pdev->dev,
2287                                 "autoneg is not supported by current port\n");
2288                         return -EOPNOTSUPP;
2289                 } else {
2290                         return 0;
2291                 }
2292         }
2293
2294         return hclge_set_autoneg_en(hdev, enable);
2295 }
2296
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2298 {
2299         struct hclge_vport *vport = hclge_get_vport(handle);
2300         struct hclge_dev *hdev = vport->back;
2301         struct phy_device *phydev = hdev->hw.mac.phydev;
2302
2303         if (phydev)
2304                 return phydev->autoneg;
2305
2306         return hdev->hw.mac.autoneg;
2307 }
2308
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2310 {
2311         struct hclge_vport *vport = hclge_get_vport(handle);
2312         struct hclge_dev *hdev = vport->back;
2313         int ret;
2314
2315         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2316
2317         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2318         if (ret)
2319                 return ret;
2320         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2321 }
2322
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2324 {
2325         struct hclge_config_fec_cmd *req;
2326         struct hclge_desc desc;
2327         int ret;
2328
2329         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2330
2331         req = (struct hclge_config_fec_cmd *)desc.data;
2332         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334         if (fec_mode & BIT(HNAE3_FEC_RS))
2335                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337         if (fec_mode & BIT(HNAE3_FEC_BASER))
2338                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2340
2341         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2342         if (ret)
2343                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2344
2345         return ret;
2346 }
2347
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2349 {
2350         struct hclge_vport *vport = hclge_get_vport(handle);
2351         struct hclge_dev *hdev = vport->back;
2352         struct hclge_mac *mac = &hdev->hw.mac;
2353         int ret;
2354
2355         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2357                 return -EINVAL;
2358         }
2359
2360         ret = hclge_set_fec_hw(hdev, fec_mode);
2361         if (ret)
2362                 return ret;
2363
2364         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2365         return 0;
2366 }
2367
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2369                           u8 *fec_mode)
2370 {
2371         struct hclge_vport *vport = hclge_get_vport(handle);
2372         struct hclge_dev *hdev = vport->back;
2373         struct hclge_mac *mac = &hdev->hw.mac;
2374
2375         if (fec_ability)
2376                 *fec_ability = mac->fec_ability;
2377         if (fec_mode)
2378                 *fec_mode = mac->fec_mode;
2379 }
2380
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2382 {
2383         struct hclge_mac *mac = &hdev->hw.mac;
2384         int ret;
2385
2386         hdev->support_sfp_query = true;
2387         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389                                          hdev->hw.mac.duplex);
2390         if (ret) {
2391                 dev_err(&hdev->pdev->dev,
2392                         "Config mac speed dup fail ret=%d\n", ret);
2393                 return ret;
2394         }
2395
2396         mac->link = 0;
2397
2398         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2400                 if (ret) {
2401                         dev_err(&hdev->pdev->dev,
2402                                 "Fec mode init fail, ret = %d\n", ret);
2403                         return ret;
2404                 }
2405         }
2406
2407         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2408         if (ret) {
2409                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2410                 return ret;
2411         }
2412
2413         ret = hclge_buffer_alloc(hdev);
2414         if (ret)
2415                 dev_err(&hdev->pdev->dev,
2416                         "allocate buffer fail, ret=%d\n", ret);
2417
2418         return ret;
2419 }
2420
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2422 {
2423         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425                 schedule_work(&hdev->mbx_service_task);
2426 }
2427
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2429 {
2430         if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2431                 schedule_work(&hdev->rst_service_task);
2432 }
2433
2434 static void hclge_task_schedule(struct hclge_dev *hdev)
2435 {
2436         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2437             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2438             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2439                 (void)schedule_work(&hdev->service_task);
2440 }
2441
2442 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2443 {
2444         struct hclge_link_status_cmd *req;
2445         struct hclge_desc desc;
2446         int link_status;
2447         int ret;
2448
2449         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2450         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2451         if (ret) {
2452                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2453                         ret);
2454                 return ret;
2455         }
2456
2457         req = (struct hclge_link_status_cmd *)desc.data;
2458         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2459
2460         return !!link_status;
2461 }
2462
2463 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2464 {
2465         int mac_state;
2466         int link_stat;
2467
2468         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2469                 return 0;
2470
2471         mac_state = hclge_get_mac_link_status(hdev);
2472
2473         if (hdev->hw.mac.phydev) {
2474                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2475                         link_stat = mac_state &
2476                                 hdev->hw.mac.phydev->link;
2477                 else
2478                         link_stat = 0;
2479
2480         } else {
2481                 link_stat = mac_state;
2482         }
2483
2484         return !!link_stat;
2485 }
2486
2487 static void hclge_update_link_status(struct hclge_dev *hdev)
2488 {
2489         struct hnae3_client *rclient = hdev->roce_client;
2490         struct hnae3_client *client = hdev->nic_client;
2491         struct hnae3_handle *rhandle;
2492         struct hnae3_handle *handle;
2493         int state;
2494         int i;
2495
2496         if (!client)
2497                 return;
2498         state = hclge_get_mac_phy_link(hdev);
2499         if (state != hdev->hw.mac.link) {
2500                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2501                         handle = &hdev->vport[i].nic;
2502                         client->ops->link_status_change(handle, state);
2503                         hclge_config_mac_tnl_int(hdev, state);
2504                         rhandle = &hdev->vport[i].roce;
2505                         if (rclient && rclient->ops->link_status_change)
2506                                 rclient->ops->link_status_change(rhandle,
2507                                                                  state);
2508                 }
2509                 hdev->hw.mac.link = state;
2510         }
2511 }
2512
2513 static void hclge_update_port_capability(struct hclge_mac *mac)
2514 {
2515         /* update fec ability by speed */
2516         hclge_convert_setting_fec(mac);
2517
2518         /* firmware can not identify back plane type, the media type
2519          * read from configuration can help deal it
2520          */
2521         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2522             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2523                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2524         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2525                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2526
2527         if (mac->support_autoneg == true) {
2528                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2529                 linkmode_copy(mac->advertising, mac->supported);
2530         } else {
2531                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2532                                    mac->supported);
2533                 linkmode_zero(mac->advertising);
2534         }
2535 }
2536
2537 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2538 {
2539         struct hclge_sfp_info_cmd *resp = NULL;
2540         struct hclge_desc desc;
2541         int ret;
2542
2543         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2544         resp = (struct hclge_sfp_info_cmd *)desc.data;
2545         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2546         if (ret == -EOPNOTSUPP) {
2547                 dev_warn(&hdev->pdev->dev,
2548                          "IMP do not support get SFP speed %d\n", ret);
2549                 return ret;
2550         } else if (ret) {
2551                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2552                 return ret;
2553         }
2554
2555         *speed = le32_to_cpu(resp->speed);
2556
2557         return 0;
2558 }
2559
2560 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2561 {
2562         struct hclge_sfp_info_cmd *resp;
2563         struct hclge_desc desc;
2564         int ret;
2565
2566         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2567         resp = (struct hclge_sfp_info_cmd *)desc.data;
2568
2569         resp->query_type = QUERY_ACTIVE_SPEED;
2570
2571         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572         if (ret == -EOPNOTSUPP) {
2573                 dev_warn(&hdev->pdev->dev,
2574                          "IMP does not support get SFP info %d\n", ret);
2575                 return ret;
2576         } else if (ret) {
2577                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2578                 return ret;
2579         }
2580
2581         mac->speed = le32_to_cpu(resp->speed);
2582         /* if resp->speed_ability is 0, it means it's an old version
2583          * firmware, do not update these params
2584          */
2585         if (resp->speed_ability) {
2586                 mac->module_type = le32_to_cpu(resp->module_type);
2587                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2588                 mac->autoneg = resp->autoneg;
2589                 mac->support_autoneg = resp->autoneg_ability;
2590                 if (!resp->active_fec)
2591                         mac->fec_mode = 0;
2592                 else
2593                         mac->fec_mode = BIT(resp->active_fec);
2594         } else {
2595                 mac->speed_type = QUERY_SFP_SPEED;
2596         }
2597
2598         return 0;
2599 }
2600
2601 static int hclge_update_port_info(struct hclge_dev *hdev)
2602 {
2603         struct hclge_mac *mac = &hdev->hw.mac;
2604         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2605         int ret;
2606
2607         /* get the port info from SFP cmd if not copper port */
2608         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2609                 return 0;
2610
2611         /* if IMP does not support get SFP/qSFP info, return directly */
2612         if (!hdev->support_sfp_query)
2613                 return 0;
2614
2615         if (hdev->pdev->revision >= 0x21)
2616                 ret = hclge_get_sfp_info(hdev, mac);
2617         else
2618                 ret = hclge_get_sfp_speed(hdev, &speed);
2619
2620         if (ret == -EOPNOTSUPP) {
2621                 hdev->support_sfp_query = false;
2622                 return ret;
2623         } else if (ret) {
2624                 return ret;
2625         }
2626
2627         if (hdev->pdev->revision >= 0x21) {
2628                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2629                         hclge_update_port_capability(mac);
2630                         return 0;
2631                 }
2632                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2633                                                HCLGE_MAC_FULL);
2634         } else {
2635                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2636                         return 0; /* do nothing if no SFP */
2637
2638                 /* must config full duplex for SFP */
2639                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2640         }
2641 }
2642
2643 static int hclge_get_status(struct hnae3_handle *handle)
2644 {
2645         struct hclge_vport *vport = hclge_get_vport(handle);
2646         struct hclge_dev *hdev = vport->back;
2647
2648         hclge_update_link_status(hdev);
2649
2650         return hdev->hw.mac.link;
2651 }
2652
2653 static void hclge_service_timer(struct timer_list *t)
2654 {
2655         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2656
2657         mod_timer(&hdev->service_timer, jiffies + HZ);
2658         hdev->hw_stats.stats_timer++;
2659         hdev->fd_arfs_expire_timer++;
2660         hclge_task_schedule(hdev);
2661 }
2662
2663 static void hclge_service_complete(struct hclge_dev *hdev)
2664 {
2665         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2666
2667         /* Flush memory before next watchdog */
2668         smp_mb__before_atomic();
2669         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2670 }
2671
2672 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2673 {
2674         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2675
2676         /* fetch the events from their corresponding regs */
2677         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2678         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2679         msix_src_reg = hclge_read_dev(&hdev->hw,
2680                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2681
2682         /* Assumption: If by any chance reset and mailbox events are reported
2683          * together then we will only process reset event in this go and will
2684          * defer the processing of the mailbox events. Since, we would have not
2685          * cleared RX CMDQ event this time we would receive again another
2686          * interrupt from H/W just for the mailbox.
2687          */
2688
2689         /* check for vector0 reset event sources */
2690         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2691                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2692                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2693                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2694                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2695                 hdev->rst_stats.imp_rst_cnt++;
2696                 return HCLGE_VECTOR0_EVENT_RST;
2697         }
2698
2699         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2700                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2701                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2702                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2703                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2704                 hdev->rst_stats.global_rst_cnt++;
2705                 return HCLGE_VECTOR0_EVENT_RST;
2706         }
2707
2708         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2709                 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2710                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2711                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2712                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2713                 hdev->rst_stats.core_rst_cnt++;
2714                 return HCLGE_VECTOR0_EVENT_RST;
2715         }
2716
2717         /* check for vector0 msix event source */
2718         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2719                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2720                         msix_src_reg);
2721                 return HCLGE_VECTOR0_EVENT_ERR;
2722         }
2723
2724         /* check for vector0 mailbox(=CMDQ RX) event source */
2725         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2726                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2727                 *clearval = cmdq_src_reg;
2728                 return HCLGE_VECTOR0_EVENT_MBX;
2729         }
2730
2731         /* print other vector0 event source */
2732         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2733                 cmdq_src_reg, msix_src_reg);
2734         return HCLGE_VECTOR0_EVENT_OTHER;
2735 }
2736
2737 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2738                                     u32 regclr)
2739 {
2740         switch (event_type) {
2741         case HCLGE_VECTOR0_EVENT_RST:
2742                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2743                 break;
2744         case HCLGE_VECTOR0_EVENT_MBX:
2745                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2746                 break;
2747         default:
2748                 break;
2749         }
2750 }
2751
2752 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2753 {
2754         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2755                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2756                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2757                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2758         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2759 }
2760
2761 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2762 {
2763         writel(enable ? 1 : 0, vector->addr);
2764 }
2765
2766 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2767 {
2768         struct hclge_dev *hdev = data;
2769         u32 event_cause;
2770         u32 clearval;
2771
2772         hclge_enable_vector(&hdev->misc_vector, false);
2773         event_cause = hclge_check_event_cause(hdev, &clearval);
2774
2775         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2776         switch (event_cause) {
2777         case HCLGE_VECTOR0_EVENT_ERR:
2778                 /* we do not know what type of reset is required now. This could
2779                  * only be decided after we fetch the type of errors which
2780                  * caused this event. Therefore, we will do below for now:
2781                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2782                  *    have defered type of reset to be used.
2783                  * 2. Schedule the reset serivce task.
2784                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2785                  *    will fetch the correct type of reset.  This would be done
2786                  *    by first decoding the types of errors.
2787                  */
2788                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2789                 /* fall through */
2790         case HCLGE_VECTOR0_EVENT_RST:
2791                 hclge_reset_task_schedule(hdev);
2792                 break;
2793         case HCLGE_VECTOR0_EVENT_MBX:
2794                 /* If we are here then,
2795                  * 1. Either we are not handling any mbx task and we are not
2796                  *    scheduled as well
2797                  *                        OR
2798                  * 2. We could be handling a mbx task but nothing more is
2799                  *    scheduled.
2800                  * In both cases, we should schedule mbx task as there are more
2801                  * mbx messages reported by this interrupt.
2802                  */
2803                 hclge_mbx_task_schedule(hdev);
2804                 break;
2805         default:
2806                 dev_warn(&hdev->pdev->dev,
2807                          "received unknown or unhandled event of vector0\n");
2808                 break;
2809         }
2810
2811         /* clear the source of interrupt if it is not cause by reset */
2812         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2813                 hclge_clear_event_cause(hdev, event_cause, clearval);
2814                 hclge_enable_vector(&hdev->misc_vector, true);
2815         }
2816
2817         return IRQ_HANDLED;
2818 }
2819
2820 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2821 {
2822         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2823                 dev_warn(&hdev->pdev->dev,
2824                          "vector(vector_id %d) has been freed.\n", vector_id);
2825                 return;
2826         }
2827
2828         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2829         hdev->num_msi_left += 1;
2830         hdev->num_msi_used -= 1;
2831 }
2832
2833 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2834 {
2835         struct hclge_misc_vector *vector = &hdev->misc_vector;
2836
2837         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2838
2839         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2840         hdev->vector_status[0] = 0;
2841
2842         hdev->num_msi_left -= 1;
2843         hdev->num_msi_used += 1;
2844 }
2845
2846 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2847 {
2848         int ret;
2849
2850         hclge_get_misc_vector(hdev);
2851
2852         /* this would be explicitly freed in the end */
2853         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2854                           0, "hclge_misc", hdev);
2855         if (ret) {
2856                 hclge_free_vector(hdev, 0);
2857                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2858                         hdev->misc_vector.vector_irq);
2859         }
2860
2861         return ret;
2862 }
2863
2864 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2865 {
2866         free_irq(hdev->misc_vector.vector_irq, hdev);
2867         hclge_free_vector(hdev, 0);
2868 }
2869
2870 int hclge_notify_client(struct hclge_dev *hdev,
2871                         enum hnae3_reset_notify_type type)
2872 {
2873         struct hnae3_client *client = hdev->nic_client;
2874         u16 i;
2875
2876         if (!client->ops->reset_notify)
2877                 return -EOPNOTSUPP;
2878
2879         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2880                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2881                 int ret;
2882
2883                 ret = client->ops->reset_notify(handle, type);
2884                 if (ret) {
2885                         dev_err(&hdev->pdev->dev,
2886                                 "notify nic client failed %d(%d)\n", type, ret);
2887                         return ret;
2888                 }
2889         }
2890
2891         return 0;
2892 }
2893
2894 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2895                                     enum hnae3_reset_notify_type type)
2896 {
2897         struct hnae3_client *client = hdev->roce_client;
2898         int ret = 0;
2899         u16 i;
2900
2901         if (!client)
2902                 return 0;
2903
2904         if (!client->ops->reset_notify)
2905                 return -EOPNOTSUPP;
2906
2907         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2908                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2909
2910                 ret = client->ops->reset_notify(handle, type);
2911                 if (ret) {
2912                         dev_err(&hdev->pdev->dev,
2913                                 "notify roce client failed %d(%d)",
2914                                 type, ret);
2915                         return ret;
2916                 }
2917         }
2918
2919         return ret;
2920 }
2921
2922 static int hclge_reset_wait(struct hclge_dev *hdev)
2923 {
2924 #define HCLGE_RESET_WATI_MS     100
2925 #define HCLGE_RESET_WAIT_CNT    200
2926         u32 val, reg, reg_bit;
2927         u32 cnt = 0;
2928
2929         switch (hdev->reset_type) {
2930         case HNAE3_IMP_RESET:
2931                 reg = HCLGE_GLOBAL_RESET_REG;
2932                 reg_bit = HCLGE_IMP_RESET_BIT;
2933                 break;
2934         case HNAE3_GLOBAL_RESET:
2935                 reg = HCLGE_GLOBAL_RESET_REG;
2936                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2937                 break;
2938         case HNAE3_CORE_RESET:
2939                 reg = HCLGE_GLOBAL_RESET_REG;
2940                 reg_bit = HCLGE_CORE_RESET_BIT;
2941                 break;
2942         case HNAE3_FUNC_RESET:
2943                 reg = HCLGE_FUN_RST_ING;
2944                 reg_bit = HCLGE_FUN_RST_ING_B;
2945                 break;
2946         case HNAE3_FLR_RESET:
2947                 break;
2948         default:
2949                 dev_err(&hdev->pdev->dev,
2950                         "Wait for unsupported reset type: %d\n",
2951                         hdev->reset_type);
2952                 return -EINVAL;
2953         }
2954
2955         if (hdev->reset_type == HNAE3_FLR_RESET) {
2956                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2957                        cnt++ < HCLGE_RESET_WAIT_CNT)
2958                         msleep(HCLGE_RESET_WATI_MS);
2959
2960                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2961                         dev_err(&hdev->pdev->dev,
2962                                 "flr wait timeout: %d\n", cnt);
2963                         return -EBUSY;
2964                 }
2965
2966                 return 0;
2967         }
2968
2969         val = hclge_read_dev(&hdev->hw, reg);
2970         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2971                 msleep(HCLGE_RESET_WATI_MS);
2972                 val = hclge_read_dev(&hdev->hw, reg);
2973                 cnt++;
2974         }
2975
2976         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2977                 dev_warn(&hdev->pdev->dev,
2978                          "Wait for reset timeout: %d\n", hdev->reset_type);
2979                 return -EBUSY;
2980         }
2981
2982         return 0;
2983 }
2984
2985 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2986 {
2987         struct hclge_vf_rst_cmd *req;
2988         struct hclge_desc desc;
2989
2990         req = (struct hclge_vf_rst_cmd *)desc.data;
2991         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2992         req->dest_vfid = func_id;
2993
2994         if (reset)
2995                 req->vf_rst = 0x1;
2996
2997         return hclge_cmd_send(&hdev->hw, &desc, 1);
2998 }
2999
3000 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3001 {
3002         int i;
3003
3004         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3005                 struct hclge_vport *vport = &hdev->vport[i];
3006                 int ret;
3007
3008                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3009                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3010                 if (ret) {
3011                         dev_err(&hdev->pdev->dev,
3012                                 "set vf(%d) rst failed %d!\n",
3013                                 vport->vport_id, ret);
3014                         return ret;
3015                 }
3016
3017                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3018                         continue;
3019
3020                 /* Inform VF to process the reset.
3021                  * hclge_inform_reset_assert_to_vf may fail if VF
3022                  * driver is not loaded.
3023                  */
3024                 ret = hclge_inform_reset_assert_to_vf(vport);
3025                 if (ret)
3026                         dev_warn(&hdev->pdev->dev,
3027                                  "inform reset to vf(%d) failed %d!\n",
3028                                  vport->vport_id, ret);
3029         }
3030
3031         return 0;
3032 }
3033
3034 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3035 {
3036         struct hclge_desc desc;
3037         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3038         int ret;
3039
3040         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3041         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3042         req->fun_reset_vfid = func_id;
3043
3044         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3045         if (ret)
3046                 dev_err(&hdev->pdev->dev,
3047                         "send function reset cmd fail, status =%d\n", ret);
3048
3049         return ret;
3050 }
3051
3052 static void hclge_do_reset(struct hclge_dev *hdev)
3053 {
3054         struct hnae3_handle *handle = &hdev->vport[0].nic;
3055         struct pci_dev *pdev = hdev->pdev;
3056         u32 val;
3057
3058         if (hclge_get_hw_reset_stat(handle)) {
3059                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3060                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3061                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3062                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3063                 return;
3064         }
3065
3066         switch (hdev->reset_type) {
3067         case HNAE3_GLOBAL_RESET:
3068                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3069                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3070                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3071                 dev_info(&pdev->dev, "Global Reset requested\n");
3072                 break;
3073         case HNAE3_CORE_RESET:
3074                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3075                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3076                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3077                 dev_info(&pdev->dev, "Core Reset requested\n");
3078                 break;
3079         case HNAE3_FUNC_RESET:
3080                 dev_info(&pdev->dev, "PF Reset requested\n");
3081                 /* schedule again to check later */
3082                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3083                 hclge_reset_task_schedule(hdev);
3084                 break;
3085         case HNAE3_FLR_RESET:
3086                 dev_info(&pdev->dev, "FLR requested\n");
3087                 /* schedule again to check later */
3088                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3089                 hclge_reset_task_schedule(hdev);
3090                 break;
3091         default:
3092                 dev_warn(&pdev->dev,
3093                          "Unsupported reset type: %d\n", hdev->reset_type);
3094                 break;
3095         }
3096 }
3097
3098 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3099                                                    unsigned long *addr)
3100 {
3101         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3102
3103         /* first, resolve any unknown reset type to the known type(s) */
3104         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3105                 /* we will intentionally ignore any errors from this function
3106                  *  as we will end up in *some* reset request in any case
3107                  */
3108                 hclge_handle_hw_msix_error(hdev, addr);
3109                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3110                 /* We defered the clearing of the error event which caused
3111                  * interrupt since it was not posssible to do that in
3112                  * interrupt context (and this is the reason we introduced
3113                  * new UNKNOWN reset type). Now, the errors have been
3114                  * handled and cleared in hardware we can safely enable
3115                  * interrupts. This is an exception to the norm.
3116                  */
3117                 hclge_enable_vector(&hdev->misc_vector, true);
3118         }
3119
3120         /* return the highest priority reset level amongst all */
3121         if (test_bit(HNAE3_IMP_RESET, addr)) {
3122                 rst_level = HNAE3_IMP_RESET;
3123                 clear_bit(HNAE3_IMP_RESET, addr);
3124                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3125                 clear_bit(HNAE3_CORE_RESET, addr);
3126                 clear_bit(HNAE3_FUNC_RESET, addr);
3127         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3128                 rst_level = HNAE3_GLOBAL_RESET;
3129                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3130                 clear_bit(HNAE3_CORE_RESET, addr);
3131                 clear_bit(HNAE3_FUNC_RESET, addr);
3132         } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3133                 rst_level = HNAE3_CORE_RESET;
3134                 clear_bit(HNAE3_CORE_RESET, addr);
3135                 clear_bit(HNAE3_FUNC_RESET, addr);
3136         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3137                 rst_level = HNAE3_FUNC_RESET;
3138                 clear_bit(HNAE3_FUNC_RESET, addr);
3139         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3140                 rst_level = HNAE3_FLR_RESET;
3141                 clear_bit(HNAE3_FLR_RESET, addr);
3142         }
3143
3144         if (hdev->reset_type != HNAE3_NONE_RESET &&
3145             rst_level < hdev->reset_type)
3146                 return HNAE3_NONE_RESET;
3147
3148         return rst_level;
3149 }
3150
3151 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3152 {
3153         u32 clearval = 0;
3154
3155         switch (hdev->reset_type) {
3156         case HNAE3_IMP_RESET:
3157                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3158                 break;
3159         case HNAE3_GLOBAL_RESET:
3160                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3161                 break;
3162         case HNAE3_CORE_RESET:
3163                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3164                 break;
3165         default:
3166                 break;
3167         }
3168
3169         if (!clearval)
3170                 return;
3171
3172         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3173         hclge_enable_vector(&hdev->misc_vector, true);
3174 }
3175
3176 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3177 {
3178         int ret = 0;
3179
3180         switch (hdev->reset_type) {
3181         case HNAE3_FUNC_RESET:
3182                 /* fall through */
3183         case HNAE3_FLR_RESET:
3184                 ret = hclge_set_all_vf_rst(hdev, true);
3185                 break;
3186         default:
3187                 break;
3188         }
3189
3190         return ret;
3191 }
3192
3193 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3194 {
3195         u32 reg_val;
3196         int ret = 0;
3197
3198         switch (hdev->reset_type) {
3199         case HNAE3_FUNC_RESET:
3200                 /* There is no mechanism for PF to know if VF has stopped IO
3201                  * for now, just wait 100 ms for VF to stop IO
3202                  */
3203                 msleep(100);
3204                 ret = hclge_func_reset_cmd(hdev, 0);
3205                 if (ret) {
3206                         dev_err(&hdev->pdev->dev,
3207                                 "asserting function reset fail %d!\n", ret);
3208                         return ret;
3209                 }
3210
3211                 /* After performaning pf reset, it is not necessary to do the
3212                  * mailbox handling or send any command to firmware, because
3213                  * any mailbox handling or command to firmware is only valid
3214                  * after hclge_cmd_init is called.
3215                  */
3216                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3217                 hdev->rst_stats.pf_rst_cnt++;
3218                 break;
3219         case HNAE3_FLR_RESET:
3220                 /* There is no mechanism for PF to know if VF has stopped IO
3221                  * for now, just wait 100 ms for VF to stop IO
3222                  */
3223                 msleep(100);
3224                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3225                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3226                 hdev->rst_stats.flr_rst_cnt++;
3227                 break;
3228         case HNAE3_IMP_RESET:
3229                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3230                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3231                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3232                 break;
3233         default:
3234                 break;
3235         }
3236
3237         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3238
3239         return ret;
3240 }
3241
3242 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3243 {
3244 #define MAX_RESET_FAIL_CNT 5
3245 #define RESET_UPGRADE_DELAY_SEC 10
3246
3247         if (hdev->reset_pending) {
3248                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3249                          hdev->reset_pending);
3250                 return true;
3251         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3252                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3253                     BIT(HCLGE_IMP_RESET_BIT))) {
3254                 dev_info(&hdev->pdev->dev,
3255                          "reset failed because IMP Reset is pending\n");
3256                 hclge_clear_reset_cause(hdev);
3257                 return false;
3258         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3259                 hdev->reset_fail_cnt++;
3260                 if (is_timeout) {
3261                         set_bit(hdev->reset_type, &hdev->reset_pending);
3262                         dev_info(&hdev->pdev->dev,
3263                                  "re-schedule to wait for hw reset done\n");
3264                         return true;
3265                 }
3266
3267                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3268                 hclge_clear_reset_cause(hdev);
3269                 mod_timer(&hdev->reset_timer,
3270                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3271
3272                 return false;
3273         }
3274
3275         hclge_clear_reset_cause(hdev);
3276         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3277         return false;
3278 }
3279
3280 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3281 {
3282         int ret = 0;
3283
3284         switch (hdev->reset_type) {
3285         case HNAE3_FUNC_RESET:
3286                 /* fall through */
3287         case HNAE3_FLR_RESET:
3288                 ret = hclge_set_all_vf_rst(hdev, false);
3289                 break;
3290         default:
3291                 break;
3292         }
3293
3294         return ret;
3295 }
3296
3297 static void hclge_reset(struct hclge_dev *hdev)
3298 {
3299         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3300         bool is_timeout = false;
3301         int ret;
3302
3303         /* Initialize ae_dev reset status as well, in case enet layer wants to
3304          * know if device is undergoing reset
3305          */
3306         ae_dev->reset_type = hdev->reset_type;
3307         hdev->rst_stats.reset_cnt++;
3308         /* perform reset of the stack & ae device for a client */
3309         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3310         if (ret)
3311                 goto err_reset;
3312
3313         ret = hclge_reset_prepare_down(hdev);
3314         if (ret)
3315                 goto err_reset;
3316
3317         rtnl_lock();
3318         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3319         if (ret)
3320                 goto err_reset_lock;
3321
3322         rtnl_unlock();
3323
3324         ret = hclge_reset_prepare_wait(hdev);
3325         if (ret)
3326                 goto err_reset;
3327
3328         if (hclge_reset_wait(hdev)) {
3329                 is_timeout = true;
3330                 goto err_reset;
3331         }
3332
3333         hdev->rst_stats.hw_reset_done_cnt++;
3334
3335         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3336         if (ret)
3337                 goto err_reset;
3338
3339         rtnl_lock();
3340         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3341         if (ret)
3342                 goto err_reset_lock;
3343
3344         ret = hclge_reset_ae_dev(hdev->ae_dev);
3345         if (ret)
3346                 goto err_reset_lock;
3347
3348         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3349         if (ret)
3350                 goto err_reset_lock;
3351
3352         ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3353         if (ret)
3354                 goto err_reset_lock;
3355
3356         hclge_clear_reset_cause(hdev);
3357
3358         ret = hclge_reset_prepare_up(hdev);
3359         if (ret)
3360                 goto err_reset_lock;
3361
3362         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3363         if (ret)
3364                 goto err_reset_lock;
3365
3366         rtnl_unlock();
3367
3368         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3369         if (ret)
3370                 goto err_reset;
3371
3372         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3373         if (ret)
3374                 goto err_reset;
3375
3376         hdev->last_reset_time = jiffies;
3377         hdev->reset_fail_cnt = 0;
3378         hdev->rst_stats.reset_done_cnt++;
3379         ae_dev->reset_type = HNAE3_NONE_RESET;
3380         del_timer(&hdev->reset_timer);
3381
3382         return;
3383
3384 err_reset_lock:
3385         rtnl_unlock();
3386 err_reset:
3387         if (hclge_reset_err_handle(hdev, is_timeout))
3388                 hclge_reset_task_schedule(hdev);
3389 }
3390
3391 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3392 {
3393         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3394         struct hclge_dev *hdev = ae_dev->priv;
3395
3396         /* We might end up getting called broadly because of 2 below cases:
3397          * 1. Recoverable error was conveyed through APEI and only way to bring
3398          *    normalcy is to reset.
3399          * 2. A new reset request from the stack due to timeout
3400          *
3401          * For the first case,error event might not have ae handle available.
3402          * check if this is a new reset request and we are not here just because
3403          * last reset attempt did not succeed and watchdog hit us again. We will
3404          * know this if last reset request did not occur very recently (watchdog
3405          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3406          * In case of new request we reset the "reset level" to PF reset.
3407          * And if it is a repeat reset request of the most recent one then we
3408          * want to make sure we throttle the reset request. Therefore, we will
3409          * not allow it again before 3*HZ times.
3410          */
3411         if (!handle)
3412                 handle = &hdev->vport[0].nic;
3413
3414         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3415                 return;
3416         else if (hdev->default_reset_request)
3417                 hdev->reset_level =
3418                         hclge_get_reset_level(hdev,
3419                                               &hdev->default_reset_request);
3420         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3421                 hdev->reset_level = HNAE3_FUNC_RESET;
3422
3423         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3424                  hdev->reset_level);
3425
3426         /* request reset & schedule reset task */
3427         set_bit(hdev->reset_level, &hdev->reset_request);
3428         hclge_reset_task_schedule(hdev);
3429
3430         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3431                 hdev->reset_level++;
3432 }
3433
3434 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3435                                         enum hnae3_reset_type rst_type)
3436 {
3437         struct hclge_dev *hdev = ae_dev->priv;
3438
3439         set_bit(rst_type, &hdev->default_reset_request);
3440 }
3441
3442 static void hclge_reset_timer(struct timer_list *t)
3443 {
3444         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3445
3446         dev_info(&hdev->pdev->dev,
3447                  "triggering global reset in reset timer\n");
3448         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3449         hclge_reset_event(hdev->pdev, NULL);
3450 }
3451
3452 static void hclge_reset_subtask(struct hclge_dev *hdev)
3453 {
3454         /* check if there is any ongoing reset in the hardware. This status can
3455          * be checked from reset_pending. If there is then, we need to wait for
3456          * hardware to complete reset.
3457          *    a. If we are able to figure out in reasonable time that hardware
3458          *       has fully resetted then, we can proceed with driver, client
3459          *       reset.
3460          *    b. else, we can come back later to check this status so re-sched
3461          *       now.
3462          */
3463         hdev->last_reset_time = jiffies;
3464         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3465         if (hdev->reset_type != HNAE3_NONE_RESET)
3466                 hclge_reset(hdev);
3467
3468         /* check if we got any *new* reset requests to be honored */
3469         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3470         if (hdev->reset_type != HNAE3_NONE_RESET)
3471                 hclge_do_reset(hdev);
3472
3473         hdev->reset_type = HNAE3_NONE_RESET;
3474 }
3475
3476 static void hclge_reset_service_task(struct work_struct *work)
3477 {
3478         struct hclge_dev *hdev =
3479                 container_of(work, struct hclge_dev, rst_service_task);
3480
3481         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3482                 return;
3483
3484         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3485
3486         hclge_reset_subtask(hdev);
3487
3488         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3489 }
3490
3491 static void hclge_mailbox_service_task(struct work_struct *work)
3492 {
3493         struct hclge_dev *hdev =
3494                 container_of(work, struct hclge_dev, mbx_service_task);
3495
3496         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3497                 return;
3498
3499         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3500
3501         hclge_mbx_handler(hdev);
3502
3503         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3504 }
3505
3506 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3507 {
3508         int i;
3509
3510         /* start from vport 1 for PF is always alive */
3511         for (i = 1; i < hdev->num_alloc_vport; i++) {
3512                 struct hclge_vport *vport = &hdev->vport[i];
3513
3514                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3515                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3516
3517                 /* If vf is not alive, set to default value */
3518                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3519                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3520         }
3521 }
3522
3523 static void hclge_service_task(struct work_struct *work)
3524 {
3525         struct hclge_dev *hdev =
3526                 container_of(work, struct hclge_dev, service_task);
3527
3528         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3529                 hclge_update_stats_for_all(hdev);
3530                 hdev->hw_stats.stats_timer = 0;
3531         }
3532
3533         hclge_update_port_info(hdev);
3534         hclge_update_link_status(hdev);
3535         hclge_update_vport_alive(hdev);
3536         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3537                 hclge_rfs_filter_expire(hdev);
3538                 hdev->fd_arfs_expire_timer = 0;
3539         }
3540         hclge_service_complete(hdev);
3541 }
3542
3543 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3544 {
3545         /* VF handle has no client */
3546         if (!handle->client)
3547                 return container_of(handle, struct hclge_vport, nic);
3548         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3549                 return container_of(handle, struct hclge_vport, roce);
3550         else
3551                 return container_of(handle, struct hclge_vport, nic);
3552 }
3553
3554 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3555                             struct hnae3_vector_info *vector_info)
3556 {
3557         struct hclge_vport *vport = hclge_get_vport(handle);
3558         struct hnae3_vector_info *vector = vector_info;
3559         struct hclge_dev *hdev = vport->back;
3560         int alloc = 0;
3561         int i, j;
3562
3563         vector_num = min(hdev->num_msi_left, vector_num);
3564
3565         for (j = 0; j < vector_num; j++) {
3566                 for (i = 1; i < hdev->num_msi; i++) {
3567                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3568                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3569                                 vector->io_addr = hdev->hw.io_base +
3570                                         HCLGE_VECTOR_REG_BASE +
3571                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3572                                         vport->vport_id *
3573                                         HCLGE_VECTOR_VF_OFFSET;
3574                                 hdev->vector_status[i] = vport->vport_id;
3575                                 hdev->vector_irq[i] = vector->vector;
3576
3577                                 vector++;
3578                                 alloc++;
3579
3580                                 break;
3581                         }
3582                 }
3583         }
3584         hdev->num_msi_left -= alloc;
3585         hdev->num_msi_used += alloc;
3586
3587         return alloc;
3588 }
3589
3590 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3591 {
3592         int i;
3593
3594         for (i = 0; i < hdev->num_msi; i++)
3595                 if (vector == hdev->vector_irq[i])
3596                         return i;
3597
3598         return -EINVAL;
3599 }
3600
3601 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3602 {
3603         struct hclge_vport *vport = hclge_get_vport(handle);
3604         struct hclge_dev *hdev = vport->back;
3605         int vector_id;
3606
3607         vector_id = hclge_get_vector_index(hdev, vector);
3608         if (vector_id < 0) {
3609                 dev_err(&hdev->pdev->dev,
3610                         "Get vector index fail. vector_id =%d\n", vector_id);
3611                 return vector_id;
3612         }
3613
3614         hclge_free_vector(hdev, vector_id);
3615
3616         return 0;
3617 }
3618
3619 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3620 {
3621         return HCLGE_RSS_KEY_SIZE;
3622 }
3623
3624 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3625 {
3626         return HCLGE_RSS_IND_TBL_SIZE;
3627 }
3628
3629 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3630                                   const u8 hfunc, const u8 *key)
3631 {
3632         struct hclge_rss_config_cmd *req;
3633         struct hclge_desc desc;
3634         int key_offset;
3635         int key_size;
3636         int ret;
3637
3638         req = (struct hclge_rss_config_cmd *)desc.data;
3639
3640         for (key_offset = 0; key_offset < 3; key_offset++) {
3641                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3642                                            false);
3643
3644                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3645                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3646
3647                 if (key_offset == 2)
3648                         key_size =
3649                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3650                 else
3651                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3652
3653                 memcpy(req->hash_key,
3654                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3655
3656                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3657                 if (ret) {
3658                         dev_err(&hdev->pdev->dev,
3659                                 "Configure RSS config fail, status = %d\n",
3660                                 ret);
3661                         return ret;
3662                 }
3663         }
3664         return 0;
3665 }
3666
3667 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3668 {
3669         struct hclge_rss_indirection_table_cmd *req;
3670         struct hclge_desc desc;
3671         int i, j;
3672         int ret;
3673
3674         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3675
3676         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3677                 hclge_cmd_setup_basic_desc
3678                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3679
3680                 req->start_table_index =
3681                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3682                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3683
3684                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3685                         req->rss_result[j] =
3686                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3687
3688                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3689                 if (ret) {
3690                         dev_err(&hdev->pdev->dev,
3691                                 "Configure rss indir table fail,status = %d\n",
3692                                 ret);
3693                         return ret;
3694                 }
3695         }
3696         return 0;
3697 }
3698
3699 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3700                                  u16 *tc_size, u16 *tc_offset)
3701 {
3702         struct hclge_rss_tc_mode_cmd *req;
3703         struct hclge_desc desc;
3704         int ret;
3705         int i;
3706
3707         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3708         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3709
3710         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3711                 u16 mode = 0;
3712
3713                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3714                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3715                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3716                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3717                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3718
3719                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3720         }
3721
3722         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3723         if (ret)
3724                 dev_err(&hdev->pdev->dev,
3725                         "Configure rss tc mode fail, status = %d\n", ret);
3726
3727         return ret;
3728 }
3729
3730 static void hclge_get_rss_type(struct hclge_vport *vport)
3731 {
3732         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3733             vport->rss_tuple_sets.ipv4_udp_en ||
3734             vport->rss_tuple_sets.ipv4_sctp_en ||
3735             vport->rss_tuple_sets.ipv6_tcp_en ||
3736             vport->rss_tuple_sets.ipv6_udp_en ||
3737             vport->rss_tuple_sets.ipv6_sctp_en)
3738                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3739         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3740                  vport->rss_tuple_sets.ipv6_fragment_en)
3741                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3742         else
3743                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3744 }
3745
3746 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3747 {
3748         struct hclge_rss_input_tuple_cmd *req;
3749         struct hclge_desc desc;
3750         int ret;
3751
3752         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3753
3754         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3755
3756         /* Get the tuple cfg from pf */
3757         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3758         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3759         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3760         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3761         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3762         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3763         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3764         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3765         hclge_get_rss_type(&hdev->vport[0]);
3766         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3767         if (ret)
3768                 dev_err(&hdev->pdev->dev,
3769                         "Configure rss input fail, status = %d\n", ret);
3770         return ret;
3771 }
3772
3773 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3774                          u8 *key, u8 *hfunc)
3775 {
3776         struct hclge_vport *vport = hclge_get_vport(handle);
3777         int i;
3778
3779         /* Get hash algorithm */
3780         if (hfunc) {
3781                 switch (vport->rss_algo) {
3782                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3783                         *hfunc = ETH_RSS_HASH_TOP;
3784                         break;
3785                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3786                         *hfunc = ETH_RSS_HASH_XOR;
3787                         break;
3788                 default:
3789                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3790                         break;
3791                 }
3792         }
3793
3794         /* Get the RSS Key required by the user */
3795         if (key)
3796                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3797
3798         /* Get indirect table */
3799         if (indir)
3800                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3801                         indir[i] =  vport->rss_indirection_tbl[i];
3802
3803         return 0;
3804 }
3805
3806 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3807                          const  u8 *key, const  u8 hfunc)
3808 {
3809         struct hclge_vport *vport = hclge_get_vport(handle);
3810         struct hclge_dev *hdev = vport->back;
3811         u8 hash_algo;
3812         int ret, i;
3813
3814         /* Set the RSS Hash Key if specififed by the user */
3815         if (key) {
3816                 switch (hfunc) {
3817                 case ETH_RSS_HASH_TOP:
3818                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3819                         break;
3820                 case ETH_RSS_HASH_XOR:
3821                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3822                         break;
3823                 case ETH_RSS_HASH_NO_CHANGE:
3824                         hash_algo = vport->rss_algo;
3825                         break;
3826                 default:
3827                         return -EINVAL;
3828                 }
3829
3830                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3831                 if (ret)
3832                         return ret;
3833
3834                 /* Update the shadow RSS key with user specified qids */
3835                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3836                 vport->rss_algo = hash_algo;
3837         }
3838
3839         /* Update the shadow RSS table with user specified qids */
3840         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3841                 vport->rss_indirection_tbl[i] = indir[i];
3842
3843         /* Update the hardware */
3844         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3845 }
3846
3847 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3848 {
3849         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3850
3851         if (nfc->data & RXH_L4_B_2_3)
3852                 hash_sets |= HCLGE_D_PORT_BIT;
3853         else
3854                 hash_sets &= ~HCLGE_D_PORT_BIT;
3855
3856         if (nfc->data & RXH_IP_SRC)
3857                 hash_sets |= HCLGE_S_IP_BIT;
3858         else
3859                 hash_sets &= ~HCLGE_S_IP_BIT;
3860
3861         if (nfc->data & RXH_IP_DST)
3862                 hash_sets |= HCLGE_D_IP_BIT;
3863         else
3864                 hash_sets &= ~HCLGE_D_IP_BIT;
3865
3866         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3867                 hash_sets |= HCLGE_V_TAG_BIT;
3868
3869         return hash_sets;
3870 }
3871
3872 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3873                                struct ethtool_rxnfc *nfc)
3874 {
3875         struct hclge_vport *vport = hclge_get_vport(handle);
3876         struct hclge_dev *hdev = vport->back;
3877         struct hclge_rss_input_tuple_cmd *req;
3878         struct hclge_desc desc;
3879         u8 tuple_sets;
3880         int ret;
3881
3882         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3883                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3884                 return -EINVAL;
3885
3886         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3887         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3888
3889         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3890         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3891         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3892         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3893         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3894         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3895         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3896         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3897
3898         tuple_sets = hclge_get_rss_hash_bits(nfc);
3899         switch (nfc->flow_type) {
3900         case TCP_V4_FLOW:
3901                 req->ipv4_tcp_en = tuple_sets;
3902                 break;
3903         case TCP_V6_FLOW:
3904                 req->ipv6_tcp_en = tuple_sets;
3905                 break;
3906         case UDP_V4_FLOW:
3907                 req->ipv4_udp_en = tuple_sets;
3908                 break;
3909         case UDP_V6_FLOW:
3910                 req->ipv6_udp_en = tuple_sets;
3911                 break;
3912         case SCTP_V4_FLOW:
3913                 req->ipv4_sctp_en = tuple_sets;
3914                 break;
3915         case SCTP_V6_FLOW:
3916                 if ((nfc->data & RXH_L4_B_0_1) ||
3917                     (nfc->data & RXH_L4_B_2_3))
3918                         return -EINVAL;
3919
3920                 req->ipv6_sctp_en = tuple_sets;
3921                 break;
3922         case IPV4_FLOW:
3923                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3924                 break;
3925         case IPV6_FLOW:
3926                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3927                 break;
3928         default:
3929                 return -EINVAL;
3930         }
3931
3932         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3933         if (ret) {
3934                 dev_err(&hdev->pdev->dev,
3935                         "Set rss tuple fail, status = %d\n", ret);
3936                 return ret;
3937         }
3938
3939         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3940         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3941         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3942         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3943         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3944         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3945         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3946         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3947         hclge_get_rss_type(vport);
3948         return 0;
3949 }
3950
3951 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3952                                struct ethtool_rxnfc *nfc)
3953 {
3954         struct hclge_vport *vport = hclge_get_vport(handle);
3955         u8 tuple_sets;
3956
3957         nfc->data = 0;
3958
3959         switch (nfc->flow_type) {
3960         case TCP_V4_FLOW:
3961                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3962                 break;
3963         case UDP_V4_FLOW:
3964                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3965                 break;
3966         case TCP_V6_FLOW:
3967                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3968                 break;
3969         case UDP_V6_FLOW:
3970                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3971                 break;
3972         case SCTP_V4_FLOW:
3973                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3974                 break;
3975         case SCTP_V6_FLOW:
3976                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3977                 break;
3978         case IPV4_FLOW:
3979         case IPV6_FLOW:
3980                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3981                 break;
3982         default:
3983                 return -EINVAL;
3984         }
3985
3986         if (!tuple_sets)
3987                 return 0;
3988
3989         if (tuple_sets & HCLGE_D_PORT_BIT)
3990                 nfc->data |= RXH_L4_B_2_3;
3991         if (tuple_sets & HCLGE_S_PORT_BIT)
3992                 nfc->data |= RXH_L4_B_0_1;
3993         if (tuple_sets & HCLGE_D_IP_BIT)
3994                 nfc->data |= RXH_IP_DST;
3995         if (tuple_sets & HCLGE_S_IP_BIT)
3996                 nfc->data |= RXH_IP_SRC;
3997
3998         return 0;
3999 }
4000
4001 static int hclge_get_tc_size(struct hnae3_handle *handle)
4002 {
4003         struct hclge_vport *vport = hclge_get_vport(handle);
4004         struct hclge_dev *hdev = vport->back;
4005
4006         return hdev->rss_size_max;
4007 }
4008
4009 int hclge_rss_init_hw(struct hclge_dev *hdev)
4010 {
4011         struct hclge_vport *vport = hdev->vport;
4012         u8 *rss_indir = vport[0].rss_indirection_tbl;
4013         u16 rss_size = vport[0].alloc_rss_size;
4014         u8 *key = vport[0].rss_hash_key;
4015         u8 hfunc = vport[0].rss_algo;
4016         u16 tc_offset[HCLGE_MAX_TC_NUM];
4017         u16 tc_valid[HCLGE_MAX_TC_NUM];
4018         u16 tc_size[HCLGE_MAX_TC_NUM];
4019         u16 roundup_size;
4020         int i, ret;
4021
4022         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4023         if (ret)
4024                 return ret;
4025
4026         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4027         if (ret)
4028                 return ret;
4029
4030         ret = hclge_set_rss_input_tuple(hdev);
4031         if (ret)
4032                 return ret;
4033
4034         /* Each TC have the same queue size, and tc_size set to hardware is
4035          * the log2 of roundup power of two of rss_size, the acutal queue
4036          * size is limited by indirection table.
4037          */
4038         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4039                 dev_err(&hdev->pdev->dev,
4040                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4041                         rss_size);
4042                 return -EINVAL;
4043         }
4044
4045         roundup_size = roundup_pow_of_two(rss_size);
4046         roundup_size = ilog2(roundup_size);
4047
4048         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4049                 tc_valid[i] = 0;
4050
4051                 if (!(hdev->hw_tc_map & BIT(i)))
4052                         continue;
4053
4054                 tc_valid[i] = 1;
4055                 tc_size[i] = roundup_size;
4056                 tc_offset[i] = rss_size * i;
4057         }
4058
4059         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4060 }
4061
4062 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4063 {
4064         struct hclge_vport *vport = hdev->vport;
4065         int i, j;
4066
4067         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4068                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4069                         vport[j].rss_indirection_tbl[i] =
4070                                 i % vport[j].alloc_rss_size;
4071         }
4072 }
4073
4074 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4075 {
4076         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4077         struct hclge_vport *vport = hdev->vport;
4078
4079         if (hdev->pdev->revision >= 0x21)
4080                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4081
4082         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4083                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4084                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4085                 vport[i].rss_tuple_sets.ipv4_udp_en =
4086                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4087                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4088                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4089                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4090                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4091                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4092                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4093                 vport[i].rss_tuple_sets.ipv6_udp_en =
4094                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4095                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4096                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4097                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4098                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4099
4100                 vport[i].rss_algo = rss_algo;
4101
4102                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4103                        HCLGE_RSS_KEY_SIZE);
4104         }
4105
4106         hclge_rss_indir_init_cfg(hdev);
4107 }
4108
4109 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4110                                 int vector_id, bool en,
4111                                 struct hnae3_ring_chain_node *ring_chain)
4112 {
4113         struct hclge_dev *hdev = vport->back;
4114         struct hnae3_ring_chain_node *node;
4115         struct hclge_desc desc;
4116         struct hclge_ctrl_vector_chain_cmd *req
4117                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4118         enum hclge_cmd_status status;
4119         enum hclge_opcode_type op;
4120         u16 tqp_type_and_id;
4121         int i;
4122
4123         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4124         hclge_cmd_setup_basic_desc(&desc, op, false);
4125         req->int_vector_id = vector_id;
4126
4127         i = 0;
4128         for (node = ring_chain; node; node = node->next) {
4129                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4130                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4131                                 HCLGE_INT_TYPE_S,
4132                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4133                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4134                                 HCLGE_TQP_ID_S, node->tqp_index);
4135                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4136                                 HCLGE_INT_GL_IDX_S,
4137                                 hnae3_get_field(node->int_gl_idx,
4138                                                 HNAE3_RING_GL_IDX_M,
4139                                                 HNAE3_RING_GL_IDX_S));
4140                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4141                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4142                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4143                         req->vfid = vport->vport_id;
4144
4145                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4146                         if (status) {
4147                                 dev_err(&hdev->pdev->dev,
4148                                         "Map TQP fail, status is %d.\n",
4149                                         status);
4150                                 return -EIO;
4151                         }
4152                         i = 0;
4153
4154                         hclge_cmd_setup_basic_desc(&desc,
4155                                                    op,
4156                                                    false);
4157                         req->int_vector_id = vector_id;
4158                 }
4159         }
4160
4161         if (i > 0) {
4162                 req->int_cause_num = i;
4163                 req->vfid = vport->vport_id;
4164                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4165                 if (status) {
4166                         dev_err(&hdev->pdev->dev,
4167                                 "Map TQP fail, status is %d.\n", status);
4168                         return -EIO;
4169                 }
4170         }
4171
4172         return 0;
4173 }
4174
4175 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4176                                     int vector,
4177                                     struct hnae3_ring_chain_node *ring_chain)
4178 {
4179         struct hclge_vport *vport = hclge_get_vport(handle);
4180         struct hclge_dev *hdev = vport->back;
4181         int vector_id;
4182
4183         vector_id = hclge_get_vector_index(hdev, vector);
4184         if (vector_id < 0) {
4185                 dev_err(&hdev->pdev->dev,
4186                         "Get vector index fail. vector_id =%d\n", vector_id);
4187                 return vector_id;
4188         }
4189
4190         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4191 }
4192
4193 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4194                                        int vector,
4195                                        struct hnae3_ring_chain_node *ring_chain)
4196 {
4197         struct hclge_vport *vport = hclge_get_vport(handle);
4198         struct hclge_dev *hdev = vport->back;
4199         int vector_id, ret;
4200
4201         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4202                 return 0;
4203
4204         vector_id = hclge_get_vector_index(hdev, vector);
4205         if (vector_id < 0) {
4206                 dev_err(&handle->pdev->dev,
4207                         "Get vector index fail. ret =%d\n", vector_id);
4208                 return vector_id;
4209         }
4210
4211         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4212         if (ret)
4213                 dev_err(&handle->pdev->dev,
4214                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4215                         vector_id,
4216                         ret);
4217
4218         return ret;
4219 }
4220
4221 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4222                                struct hclge_promisc_param *param)
4223 {
4224         struct hclge_promisc_cfg_cmd *req;
4225         struct hclge_desc desc;
4226         int ret;
4227
4228         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4229
4230         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4231         req->vf_id = param->vf_id;
4232
4233         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4234          * pdev revision(0x20), new revision support them. The
4235          * value of this two fields will not return error when driver
4236          * send command to fireware in revision(0x20).
4237          */
4238         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4239                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4240
4241         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4242         if (ret)
4243                 dev_err(&hdev->pdev->dev,
4244                         "Set promisc mode fail, status is %d.\n", ret);
4245
4246         return ret;
4247 }
4248
4249 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4250                               bool en_mc, bool en_bc, int vport_id)
4251 {
4252         if (!param)
4253                 return;
4254
4255         memset(param, 0, sizeof(struct hclge_promisc_param));
4256         if (en_uc)
4257                 param->enable = HCLGE_PROMISC_EN_UC;
4258         if (en_mc)
4259                 param->enable |= HCLGE_PROMISC_EN_MC;
4260         if (en_bc)
4261                 param->enable |= HCLGE_PROMISC_EN_BC;
4262         param->vf_id = vport_id;
4263 }
4264
4265 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4266                                   bool en_mc_pmc)
4267 {
4268         struct hclge_vport *vport = hclge_get_vport(handle);
4269         struct hclge_dev *hdev = vport->back;
4270         struct hclge_promisc_param param;
4271         bool en_bc_pmc = true;
4272
4273         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4274          * always bypassed. So broadcast promisc should be disabled until
4275          * user enable promisc mode
4276          */
4277         if (handle->pdev->revision == 0x20)
4278                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4279
4280         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4281                                  vport->vport_id);
4282         return hclge_cmd_set_promisc_mode(hdev, &param);
4283 }
4284
4285 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4286 {
4287         struct hclge_get_fd_mode_cmd *req;
4288         struct hclge_desc desc;
4289         int ret;
4290
4291         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4292
4293         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4294
4295         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4296         if (ret) {
4297                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4298                 return ret;
4299         }
4300
4301         *fd_mode = req->mode;
4302
4303         return ret;
4304 }
4305
4306 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4307                                    u32 *stage1_entry_num,
4308                                    u32 *stage2_entry_num,
4309                                    u16 *stage1_counter_num,
4310                                    u16 *stage2_counter_num)
4311 {
4312         struct hclge_get_fd_allocation_cmd *req;
4313         struct hclge_desc desc;
4314         int ret;
4315
4316         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4317
4318         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4319
4320         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4321         if (ret) {
4322                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4323                         ret);
4324                 return ret;
4325         }
4326
4327         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4328         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4329         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4330         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4331
4332         return ret;
4333 }
4334
4335 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4336 {
4337         struct hclge_set_fd_key_config_cmd *req;
4338         struct hclge_fd_key_cfg *stage;
4339         struct hclge_desc desc;
4340         int ret;
4341
4342         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4343
4344         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4345         stage = &hdev->fd_cfg.key_cfg[stage_num];
4346         req->stage = stage_num;
4347         req->key_select = stage->key_sel;
4348         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4349         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4350         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4351         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4352         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4353         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4354
4355         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4356         if (ret)
4357                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4358
4359         return ret;
4360 }
4361
4362 static int hclge_init_fd_config(struct hclge_dev *hdev)
4363 {
4364 #define LOW_2_WORDS             0x03
4365         struct hclge_fd_key_cfg *key_cfg;
4366         int ret;
4367
4368         if (!hnae3_dev_fd_supported(hdev))
4369                 return 0;
4370
4371         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4372         if (ret)
4373                 return ret;
4374
4375         switch (hdev->fd_cfg.fd_mode) {
4376         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4377                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4378                 break;
4379         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4380                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4381                 break;
4382         default:
4383                 dev_err(&hdev->pdev->dev,
4384                         "Unsupported flow director mode %d\n",
4385                         hdev->fd_cfg.fd_mode);
4386                 return -EOPNOTSUPP;
4387         }
4388
4389         hdev->fd_cfg.proto_support =
4390                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4391                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4392         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4393         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4394         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4395         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4396         key_cfg->outer_sipv6_word_en = 0;
4397         key_cfg->outer_dipv6_word_en = 0;
4398
4399         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4400                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4401                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4402                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4403
4404         /* If use max 400bit key, we can support tuples for ether type */
4405         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4406                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4407                 key_cfg->tuple_active |=
4408                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4409         }
4410
4411         /* roce_type is used to filter roce frames
4412          * dst_vport is used to specify the rule
4413          */
4414         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4415
4416         ret = hclge_get_fd_allocation(hdev,
4417                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4418                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4419                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4420                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4421         if (ret)
4422                 return ret;
4423
4424         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4425 }
4426
4427 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4428                                 int loc, u8 *key, bool is_add)
4429 {
4430         struct hclge_fd_tcam_config_1_cmd *req1;
4431         struct hclge_fd_tcam_config_2_cmd *req2;
4432         struct hclge_fd_tcam_config_3_cmd *req3;
4433         struct hclge_desc desc[3];
4434         int ret;
4435
4436         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4437         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4438         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4439         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4440         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4441
4442         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4443         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4444         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4445
4446         req1->stage = stage;
4447         req1->xy_sel = sel_x ? 1 : 0;
4448         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4449         req1->index = cpu_to_le32(loc);
4450         req1->entry_vld = sel_x ? is_add : 0;
4451
4452         if (key) {
4453                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4454                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4455                        sizeof(req2->tcam_data));
4456                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4457                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4458         }
4459
4460         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4461         if (ret)
4462                 dev_err(&hdev->pdev->dev,
4463                         "config tcam key fail, ret=%d\n",
4464                         ret);
4465
4466         return ret;
4467 }
4468
4469 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4470                               struct hclge_fd_ad_data *action)
4471 {
4472         struct hclge_fd_ad_config_cmd *req;
4473         struct hclge_desc desc;
4474         u64 ad_data = 0;
4475         int ret;
4476
4477         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4478
4479         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4480         req->index = cpu_to_le32(loc);
4481         req->stage = stage;
4482
4483         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4484                       action->write_rule_id_to_bd);
4485         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4486                         action->rule_id);
4487         ad_data <<= 32;
4488         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4489         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4490                       action->forward_to_direct_queue);
4491         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4492                         action->queue_id);
4493         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4494         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4495                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4496         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4497         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4498                         action->counter_id);
4499
4500         req->ad_data = cpu_to_le64(ad_data);
4501         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4502         if (ret)
4503                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4504
4505         return ret;
4506 }
4507
4508 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4509                                    struct hclge_fd_rule *rule)
4510 {
4511         u16 tmp_x_s, tmp_y_s;
4512         u32 tmp_x_l, tmp_y_l;
4513         int i;
4514
4515         if (rule->unused_tuple & tuple_bit)
4516                 return true;
4517
4518         switch (tuple_bit) {
4519         case 0:
4520                 return false;
4521         case BIT(INNER_DST_MAC):
4522                 for (i = 0; i < 6; i++) {
4523                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4524                                rule->tuples_mask.dst_mac[i]);
4525                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4526                                rule->tuples_mask.dst_mac[i]);
4527                 }
4528
4529                 return true;
4530         case BIT(INNER_SRC_MAC):
4531                 for (i = 0; i < 6; i++) {
4532                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4533                                rule->tuples.src_mac[i]);
4534                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4535                                rule->tuples.src_mac[i]);
4536                 }
4537
4538                 return true;
4539         case BIT(INNER_VLAN_TAG_FST):
4540                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4541                        rule->tuples_mask.vlan_tag1);
4542                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4543                        rule->tuples_mask.vlan_tag1);
4544                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4545                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4546
4547                 return true;
4548         case BIT(INNER_ETH_TYPE):
4549                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4550                        rule->tuples_mask.ether_proto);
4551                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4552                        rule->tuples_mask.ether_proto);
4553                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4554                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4555
4556                 return true;
4557         case BIT(INNER_IP_TOS):
4558                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4559                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4560
4561                 return true;
4562         case BIT(INNER_IP_PROTO):
4563                 calc_x(*key_x, rule->tuples.ip_proto,
4564                        rule->tuples_mask.ip_proto);
4565                 calc_y(*key_y, rule->tuples.ip_proto,
4566                        rule->tuples_mask.ip_proto);
4567
4568                 return true;
4569         case BIT(INNER_SRC_IP):
4570                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4571                        rule->tuples_mask.src_ip[3]);
4572                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4573                        rule->tuples_mask.src_ip[3]);
4574                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4575                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4576
4577                 return true;
4578         case BIT(INNER_DST_IP):
4579                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4580                        rule->tuples_mask.dst_ip[3]);
4581                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4582                        rule->tuples_mask.dst_ip[3]);
4583                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4584                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4585
4586                 return true;
4587         case BIT(INNER_SRC_PORT):
4588                 calc_x(tmp_x_s, rule->tuples.src_port,
4589                        rule->tuples_mask.src_port);
4590                 calc_y(tmp_y_s, rule->tuples.src_port,
4591                        rule->tuples_mask.src_port);
4592                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4593                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4594
4595                 return true;
4596         case BIT(INNER_DST_PORT):
4597                 calc_x(tmp_x_s, rule->tuples.dst_port,
4598                        rule->tuples_mask.dst_port);
4599                 calc_y(tmp_y_s, rule->tuples.dst_port,
4600                        rule->tuples_mask.dst_port);
4601                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4602                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4603
4604                 return true;
4605         default:
4606                 return false;
4607         }
4608 }
4609
4610 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4611                                  u8 vf_id, u8 network_port_id)
4612 {
4613         u32 port_number = 0;
4614
4615         if (port_type == HOST_PORT) {
4616                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4617                                 pf_id);
4618                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4619                                 vf_id);
4620                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4621         } else {
4622                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4623                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4624                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4625         }
4626
4627         return port_number;
4628 }
4629
4630 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4631                                        __le32 *key_x, __le32 *key_y,
4632                                        struct hclge_fd_rule *rule)
4633 {
4634         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4635         u8 cur_pos = 0, tuple_size, shift_bits;
4636         int i;
4637
4638         for (i = 0; i < MAX_META_DATA; i++) {
4639                 tuple_size = meta_data_key_info[i].key_length;
4640                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4641
4642                 switch (tuple_bit) {
4643                 case BIT(ROCE_TYPE):
4644                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4645                         cur_pos += tuple_size;
4646                         break;
4647                 case BIT(DST_VPORT):
4648                         port_number = hclge_get_port_number(HOST_PORT, 0,
4649                                                             rule->vf_id, 0);
4650                         hnae3_set_field(meta_data,
4651                                         GENMASK(cur_pos + tuple_size, cur_pos),
4652                                         cur_pos, port_number);
4653                         cur_pos += tuple_size;
4654                         break;
4655                 default:
4656                         break;
4657                 }
4658         }
4659
4660         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4661         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4662         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4663
4664         *key_x = cpu_to_le32(tmp_x << shift_bits);
4665         *key_y = cpu_to_le32(tmp_y << shift_bits);
4666 }
4667
4668 /* A complete key is combined with meta data key and tuple key.
4669  * Meta data key is stored at the MSB region, and tuple key is stored at
4670  * the LSB region, unused bits will be filled 0.
4671  */
4672 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4673                             struct hclge_fd_rule *rule)
4674 {
4675         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4676         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4677         u8 *cur_key_x, *cur_key_y;
4678         int i, ret, tuple_size;
4679         u8 meta_data_region;
4680
4681         memset(key_x, 0, sizeof(key_x));
4682         memset(key_y, 0, sizeof(key_y));
4683         cur_key_x = key_x;
4684         cur_key_y = key_y;
4685
4686         for (i = 0 ; i < MAX_TUPLE; i++) {
4687                 bool tuple_valid;
4688                 u32 check_tuple;
4689
4690                 tuple_size = tuple_key_info[i].key_length / 8;
4691                 check_tuple = key_cfg->tuple_active & BIT(i);
4692
4693                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4694                                                      cur_key_y, rule);
4695                 if (tuple_valid) {
4696                         cur_key_x += tuple_size;
4697                         cur_key_y += tuple_size;
4698                 }
4699         }
4700
4701         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4702                         MAX_META_DATA_LENGTH / 8;
4703
4704         hclge_fd_convert_meta_data(key_cfg,
4705                                    (__le32 *)(key_x + meta_data_region),
4706                                    (__le32 *)(key_y + meta_data_region),
4707                                    rule);
4708
4709         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4710                                    true);
4711         if (ret) {
4712                 dev_err(&hdev->pdev->dev,
4713                         "fd key_y config fail, loc=%d, ret=%d\n",
4714                         rule->queue_id, ret);
4715                 return ret;
4716         }
4717
4718         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4719                                    true);
4720         if (ret)
4721                 dev_err(&hdev->pdev->dev,
4722                         "fd key_x config fail, loc=%d, ret=%d\n",
4723                         rule->queue_id, ret);
4724         return ret;
4725 }
4726
4727 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4728                                struct hclge_fd_rule *rule)
4729 {
4730         struct hclge_fd_ad_data ad_data;
4731
4732         ad_data.ad_id = rule->location;
4733
4734         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4735                 ad_data.drop_packet = true;
4736                 ad_data.forward_to_direct_queue = false;
4737                 ad_data.queue_id = 0;
4738         } else {
4739                 ad_data.drop_packet = false;
4740                 ad_data.forward_to_direct_queue = true;
4741                 ad_data.queue_id = rule->queue_id;
4742         }
4743
4744         ad_data.use_counter = false;
4745         ad_data.counter_id = 0;
4746
4747         ad_data.use_next_stage = false;
4748         ad_data.next_input_key = 0;
4749
4750         ad_data.write_rule_id_to_bd = true;
4751         ad_data.rule_id = rule->location;
4752
4753         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4754 }
4755
4756 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4757                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4758 {
4759         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4760         struct ethtool_usrip4_spec *usr_ip4_spec;
4761         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4762         struct ethtool_usrip6_spec *usr_ip6_spec;
4763         struct ethhdr *ether_spec;
4764
4765         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4766                 return -EINVAL;
4767
4768         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4769                 return -EOPNOTSUPP;
4770
4771         if ((fs->flow_type & FLOW_EXT) &&
4772             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4773                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4774                 return -EOPNOTSUPP;
4775         }
4776
4777         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4778         case SCTP_V4_FLOW:
4779         case TCP_V4_FLOW:
4780         case UDP_V4_FLOW:
4781                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4782                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4783
4784                 if (!tcp_ip4_spec->ip4src)
4785                         *unused |= BIT(INNER_SRC_IP);
4786
4787                 if (!tcp_ip4_spec->ip4dst)
4788                         *unused |= BIT(INNER_DST_IP);
4789
4790                 if (!tcp_ip4_spec->psrc)
4791                         *unused |= BIT(INNER_SRC_PORT);
4792
4793                 if (!tcp_ip4_spec->pdst)
4794                         *unused |= BIT(INNER_DST_PORT);
4795
4796                 if (!tcp_ip4_spec->tos)
4797                         *unused |= BIT(INNER_IP_TOS);
4798
4799                 break;
4800         case IP_USER_FLOW:
4801                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4802                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4803                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4804
4805                 if (!usr_ip4_spec->ip4src)
4806                         *unused |= BIT(INNER_SRC_IP);
4807
4808                 if (!usr_ip4_spec->ip4dst)
4809                         *unused |= BIT(INNER_DST_IP);
4810
4811                 if (!usr_ip4_spec->tos)
4812                         *unused |= BIT(INNER_IP_TOS);
4813
4814                 if (!usr_ip4_spec->proto)
4815                         *unused |= BIT(INNER_IP_PROTO);
4816
4817                 if (usr_ip4_spec->l4_4_bytes)
4818                         return -EOPNOTSUPP;
4819
4820                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4821                         return -EOPNOTSUPP;
4822
4823                 break;
4824         case SCTP_V6_FLOW:
4825         case TCP_V6_FLOW:
4826         case UDP_V6_FLOW:
4827                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4828                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4829                         BIT(INNER_IP_TOS);
4830
4831                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4832                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4833                         *unused |= BIT(INNER_SRC_IP);
4834
4835                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4836                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4837                         *unused |= BIT(INNER_DST_IP);
4838
4839                 if (!tcp_ip6_spec->psrc)
4840                         *unused |= BIT(INNER_SRC_PORT);
4841
4842                 if (!tcp_ip6_spec->pdst)
4843                         *unused |= BIT(INNER_DST_PORT);
4844
4845                 if (tcp_ip6_spec->tclass)
4846                         return -EOPNOTSUPP;
4847
4848                 break;
4849         case IPV6_USER_FLOW:
4850                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4851                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4852                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4853                         BIT(INNER_DST_PORT);
4854
4855                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4856                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4857                         *unused |= BIT(INNER_SRC_IP);
4858
4859                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4860                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4861                         *unused |= BIT(INNER_DST_IP);
4862
4863                 if (!usr_ip6_spec->l4_proto)
4864                         *unused |= BIT(INNER_IP_PROTO);
4865
4866                 if (usr_ip6_spec->tclass)
4867                         return -EOPNOTSUPP;
4868
4869                 if (usr_ip6_spec->l4_4_bytes)
4870                         return -EOPNOTSUPP;
4871
4872                 break;
4873         case ETHER_FLOW:
4874                 ether_spec = &fs->h_u.ether_spec;
4875                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4876                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4877                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4878
4879                 if (is_zero_ether_addr(ether_spec->h_source))
4880                         *unused |= BIT(INNER_SRC_MAC);
4881
4882                 if (is_zero_ether_addr(ether_spec->h_dest))
4883                         *unused |= BIT(INNER_DST_MAC);
4884
4885                 if (!ether_spec->h_proto)
4886                         *unused |= BIT(INNER_ETH_TYPE);
4887
4888                 break;
4889         default:
4890                 return -EOPNOTSUPP;
4891         }
4892
4893         if ((fs->flow_type & FLOW_EXT)) {
4894                 if (fs->h_ext.vlan_etype)
4895                         return -EOPNOTSUPP;
4896                 if (!fs->h_ext.vlan_tci)
4897                         *unused |= BIT(INNER_VLAN_TAG_FST);
4898
4899                 if (fs->m_ext.vlan_tci) {
4900                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4901                                 return -EINVAL;
4902                 }
4903         } else {
4904                 *unused |= BIT(INNER_VLAN_TAG_FST);
4905         }
4906
4907         if (fs->flow_type & FLOW_MAC_EXT) {
4908                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4909                         return -EOPNOTSUPP;
4910
4911                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4912                         *unused |= BIT(INNER_DST_MAC);
4913                 else
4914                         *unused &= ~(BIT(INNER_DST_MAC));
4915         }
4916
4917         return 0;
4918 }
4919
4920 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4921 {
4922         struct hclge_fd_rule *rule = NULL;
4923         struct hlist_node *node2;
4924
4925         spin_lock_bh(&hdev->fd_rule_lock);
4926         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4927                 if (rule->location >= location)
4928                         break;
4929         }
4930
4931         spin_unlock_bh(&hdev->fd_rule_lock);
4932
4933         return  rule && rule->location == location;
4934 }
4935
4936 /* make sure being called after lock up with fd_rule_lock */
4937 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4938                                      struct hclge_fd_rule *new_rule,
4939                                      u16 location,
4940                                      bool is_add)
4941 {
4942         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4943         struct hlist_node *node2;
4944
4945         if (is_add && !new_rule)
4946                 return -EINVAL;
4947
4948         hlist_for_each_entry_safe(rule, node2,
4949                                   &hdev->fd_rule_list, rule_node) {
4950                 if (rule->location >= location)
4951                         break;
4952                 parent = rule;
4953         }
4954
4955         if (rule && rule->location == location) {
4956                 hlist_del(&rule->rule_node);
4957                 kfree(rule);
4958                 hdev->hclge_fd_rule_num--;
4959
4960                 if (!is_add) {
4961                         if (!hdev->hclge_fd_rule_num)
4962                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4963                         clear_bit(location, hdev->fd_bmap);
4964
4965                         return 0;
4966                 }
4967         } else if (!is_add) {
4968                 dev_err(&hdev->pdev->dev,
4969                         "delete fail, rule %d is inexistent\n",
4970                         location);
4971                 return -EINVAL;
4972         }
4973
4974         INIT_HLIST_NODE(&new_rule->rule_node);
4975
4976         if (parent)
4977                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4978         else
4979                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4980
4981         set_bit(location, hdev->fd_bmap);
4982         hdev->hclge_fd_rule_num++;
4983         hdev->fd_active_type = new_rule->rule_type;
4984
4985         return 0;
4986 }
4987
4988 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4989                               struct ethtool_rx_flow_spec *fs,
4990                               struct hclge_fd_rule *rule)
4991 {
4992         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4993
4994         switch (flow_type) {
4995         case SCTP_V4_FLOW:
4996         case TCP_V4_FLOW:
4997         case UDP_V4_FLOW:
4998                 rule->tuples.src_ip[3] =
4999                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5000                 rule->tuples_mask.src_ip[3] =
5001                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5002
5003                 rule->tuples.dst_ip[3] =
5004                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5005                 rule->tuples_mask.dst_ip[3] =
5006                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5007
5008                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5009                 rule->tuples_mask.src_port =
5010                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5011
5012                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5013                 rule->tuples_mask.dst_port =
5014                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5015
5016                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5017                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5018
5019                 rule->tuples.ether_proto = ETH_P_IP;
5020                 rule->tuples_mask.ether_proto = 0xFFFF;
5021
5022                 break;
5023         case IP_USER_FLOW:
5024                 rule->tuples.src_ip[3] =
5025                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5026                 rule->tuples_mask.src_ip[3] =
5027                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5028
5029                 rule->tuples.dst_ip[3] =
5030                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5031                 rule->tuples_mask.dst_ip[3] =
5032                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5033
5034                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5035                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5036
5037                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5038                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5039
5040                 rule->tuples.ether_proto = ETH_P_IP;
5041                 rule->tuples_mask.ether_proto = 0xFFFF;
5042
5043                 break;
5044         case SCTP_V6_FLOW:
5045         case TCP_V6_FLOW:
5046         case UDP_V6_FLOW:
5047                 be32_to_cpu_array(rule->tuples.src_ip,
5048                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
5049                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5050                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
5051
5052                 be32_to_cpu_array(rule->tuples.dst_ip,
5053                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
5054                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5055                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
5056
5057                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5058                 rule->tuples_mask.src_port =
5059                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5060
5061                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5062                 rule->tuples_mask.dst_port =
5063                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5064
5065                 rule->tuples.ether_proto = ETH_P_IPV6;
5066                 rule->tuples_mask.ether_proto = 0xFFFF;
5067
5068                 break;
5069         case IPV6_USER_FLOW:
5070                 be32_to_cpu_array(rule->tuples.src_ip,
5071                                   fs->h_u.usr_ip6_spec.ip6src, 4);
5072                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5073                                   fs->m_u.usr_ip6_spec.ip6src, 4);
5074
5075                 be32_to_cpu_array(rule->tuples.dst_ip,
5076                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
5077                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5078                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
5079
5080                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5081                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5082
5083                 rule->tuples.ether_proto = ETH_P_IPV6;
5084                 rule->tuples_mask.ether_proto = 0xFFFF;
5085
5086                 break;
5087         case ETHER_FLOW:
5088                 ether_addr_copy(rule->tuples.src_mac,
5089                                 fs->h_u.ether_spec.h_source);
5090                 ether_addr_copy(rule->tuples_mask.src_mac,
5091                                 fs->m_u.ether_spec.h_source);
5092
5093                 ether_addr_copy(rule->tuples.dst_mac,
5094                                 fs->h_u.ether_spec.h_dest);
5095                 ether_addr_copy(rule->tuples_mask.dst_mac,
5096                                 fs->m_u.ether_spec.h_dest);
5097
5098                 rule->tuples.ether_proto =
5099                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5100                 rule->tuples_mask.ether_proto =
5101                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5102
5103                 break;
5104         default:
5105                 return -EOPNOTSUPP;
5106         }
5107
5108         switch (flow_type) {
5109         case SCTP_V4_FLOW:
5110         case SCTP_V6_FLOW:
5111                 rule->tuples.ip_proto = IPPROTO_SCTP;
5112                 rule->tuples_mask.ip_proto = 0xFF;
5113                 break;
5114         case TCP_V4_FLOW:
5115         case TCP_V6_FLOW:
5116                 rule->tuples.ip_proto = IPPROTO_TCP;
5117                 rule->tuples_mask.ip_proto = 0xFF;
5118                 break;
5119         case UDP_V4_FLOW:
5120         case UDP_V6_FLOW:
5121                 rule->tuples.ip_proto = IPPROTO_UDP;
5122                 rule->tuples_mask.ip_proto = 0xFF;
5123                 break;
5124         default:
5125                 break;
5126         }
5127
5128         if ((fs->flow_type & FLOW_EXT)) {
5129                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5130                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5131         }
5132
5133         if (fs->flow_type & FLOW_MAC_EXT) {
5134                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5135                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5136         }
5137
5138         return 0;
5139 }
5140
5141 /* make sure being called after lock up with fd_rule_lock */
5142 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5143                                 struct hclge_fd_rule *rule)
5144 {
5145         int ret;
5146
5147         if (!rule) {
5148                 dev_err(&hdev->pdev->dev,
5149                         "The flow director rule is NULL\n");
5150                 return -EINVAL;
5151         }
5152
5153         /* it will never fail here, so needn't to check return value */
5154         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5155
5156         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5157         if (ret)
5158                 goto clear_rule;
5159
5160         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5161         if (ret)
5162                 goto clear_rule;
5163
5164         return 0;
5165
5166 clear_rule:
5167         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5168         return ret;
5169 }
5170
5171 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5172                               struct ethtool_rxnfc *cmd)
5173 {
5174         struct hclge_vport *vport = hclge_get_vport(handle);
5175         struct hclge_dev *hdev = vport->back;
5176         u16 dst_vport_id = 0, q_index = 0;
5177         struct ethtool_rx_flow_spec *fs;
5178         struct hclge_fd_rule *rule;
5179         u32 unused = 0;
5180         u8 action;
5181         int ret;
5182
5183         if (!hnae3_dev_fd_supported(hdev))
5184                 return -EOPNOTSUPP;
5185
5186         if (!hdev->fd_en) {
5187                 dev_warn(&hdev->pdev->dev,
5188                          "Please enable flow director first\n");
5189                 return -EOPNOTSUPP;
5190         }
5191
5192         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5193
5194         ret = hclge_fd_check_spec(hdev, fs, &unused);
5195         if (ret) {
5196                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5197                 return ret;
5198         }
5199
5200         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5201                 action = HCLGE_FD_ACTION_DROP_PACKET;
5202         } else {
5203                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5204                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5205                 u16 tqps;
5206
5207                 if (vf > hdev->num_req_vfs) {
5208                         dev_err(&hdev->pdev->dev,
5209                                 "Error: vf id (%d) > max vf num (%d)\n",
5210                                 vf, hdev->num_req_vfs);
5211                         return -EINVAL;
5212                 }
5213
5214                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5215                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5216
5217                 if (ring >= tqps) {
5218                         dev_err(&hdev->pdev->dev,
5219                                 "Error: queue id (%d) > max tqp num (%d)\n",
5220                                 ring, tqps - 1);
5221                         return -EINVAL;
5222                 }
5223
5224                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5225                 q_index = ring;
5226         }
5227
5228         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5229         if (!rule)
5230                 return -ENOMEM;
5231
5232         ret = hclge_fd_get_tuple(hdev, fs, rule);
5233         if (ret) {
5234                 kfree(rule);
5235                 return ret;
5236         }
5237
5238         rule->flow_type = fs->flow_type;
5239
5240         rule->location = fs->location;
5241         rule->unused_tuple = unused;
5242         rule->vf_id = dst_vport_id;
5243         rule->queue_id = q_index;
5244         rule->action = action;
5245         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5246
5247         /* to avoid rule conflict, when user configure rule by ethtool,
5248          * we need to clear all arfs rules
5249          */
5250         hclge_clear_arfs_rules(handle);
5251
5252         spin_lock_bh(&hdev->fd_rule_lock);
5253         ret = hclge_fd_config_rule(hdev, rule);
5254
5255         spin_unlock_bh(&hdev->fd_rule_lock);
5256
5257         return ret;
5258 }
5259
5260 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5261                               struct ethtool_rxnfc *cmd)
5262 {
5263         struct hclge_vport *vport = hclge_get_vport(handle);
5264         struct hclge_dev *hdev = vport->back;
5265         struct ethtool_rx_flow_spec *fs;
5266         int ret;
5267
5268         if (!hnae3_dev_fd_supported(hdev))
5269                 return -EOPNOTSUPP;
5270
5271         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5272
5273         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5274                 return -EINVAL;
5275
5276         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5277                 dev_err(&hdev->pdev->dev,
5278                         "Delete fail, rule %d is inexistent\n",
5279                         fs->location);
5280                 return -ENOENT;
5281         }
5282
5283         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5284                                    fs->location, NULL, false);
5285         if (ret)
5286                 return ret;
5287
5288         spin_lock_bh(&hdev->fd_rule_lock);
5289         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5290
5291         spin_unlock_bh(&hdev->fd_rule_lock);
5292
5293         return ret;
5294 }
5295
5296 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5297                                      bool clear_list)
5298 {
5299         struct hclge_vport *vport = hclge_get_vport(handle);
5300         struct hclge_dev *hdev = vport->back;
5301         struct hclge_fd_rule *rule;
5302         struct hlist_node *node;
5303         u16 location;
5304
5305         if (!hnae3_dev_fd_supported(hdev))
5306                 return;
5307
5308         spin_lock_bh(&hdev->fd_rule_lock);
5309         for_each_set_bit(location, hdev->fd_bmap,
5310                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5311                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5312                                      NULL, false);
5313
5314         if (clear_list) {
5315                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5316                                           rule_node) {
5317                         hlist_del(&rule->rule_node);
5318                         kfree(rule);
5319                 }
5320                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5321                 hdev->hclge_fd_rule_num = 0;
5322                 bitmap_zero(hdev->fd_bmap,
5323                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5324         }
5325
5326         spin_unlock_bh(&hdev->fd_rule_lock);
5327 }
5328
5329 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5330 {
5331         struct hclge_vport *vport = hclge_get_vport(handle);
5332         struct hclge_dev *hdev = vport->back;
5333         struct hclge_fd_rule *rule;
5334         struct hlist_node *node;
5335         int ret;
5336
5337         /* Return ok here, because reset error handling will check this
5338          * return value. If error is returned here, the reset process will
5339          * fail.
5340          */
5341         if (!hnae3_dev_fd_supported(hdev))
5342                 return 0;
5343
5344         /* if fd is disabled, should not restore it when reset */
5345         if (!hdev->fd_en)
5346                 return 0;
5347
5348         spin_lock_bh(&hdev->fd_rule_lock);
5349         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5350                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5351                 if (!ret)
5352                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5353
5354                 if (ret) {
5355                         dev_warn(&hdev->pdev->dev,
5356                                  "Restore rule %d failed, remove it\n",
5357                                  rule->location);
5358                         clear_bit(rule->location, hdev->fd_bmap);
5359                         hlist_del(&rule->rule_node);
5360                         kfree(rule);
5361                         hdev->hclge_fd_rule_num--;
5362                 }
5363         }
5364
5365         if (hdev->hclge_fd_rule_num)
5366                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5367
5368         spin_unlock_bh(&hdev->fd_rule_lock);
5369
5370         return 0;
5371 }
5372
5373 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5374                                  struct ethtool_rxnfc *cmd)
5375 {
5376         struct hclge_vport *vport = hclge_get_vport(handle);
5377         struct hclge_dev *hdev = vport->back;
5378
5379         if (!hnae3_dev_fd_supported(hdev))
5380                 return -EOPNOTSUPP;
5381
5382         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5383         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5384
5385         return 0;
5386 }
5387
5388 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5389                                   struct ethtool_rxnfc *cmd)
5390 {
5391         struct hclge_vport *vport = hclge_get_vport(handle);
5392         struct hclge_fd_rule *rule = NULL;
5393         struct hclge_dev *hdev = vport->back;
5394         struct ethtool_rx_flow_spec *fs;
5395         struct hlist_node *node2;
5396
5397         if (!hnae3_dev_fd_supported(hdev))
5398                 return -EOPNOTSUPP;
5399
5400         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5401
5402         spin_lock_bh(&hdev->fd_rule_lock);
5403
5404         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5405                 if (rule->location >= fs->location)
5406                         break;
5407         }
5408
5409         if (!rule || fs->location != rule->location) {
5410                 spin_unlock_bh(&hdev->fd_rule_lock);
5411
5412                 return -ENOENT;
5413         }
5414
5415         fs->flow_type = rule->flow_type;
5416         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5417         case SCTP_V4_FLOW:
5418         case TCP_V4_FLOW:
5419         case UDP_V4_FLOW:
5420                 fs->h_u.tcp_ip4_spec.ip4src =
5421                                 cpu_to_be32(rule->tuples.src_ip[3]);
5422                 fs->m_u.tcp_ip4_spec.ip4src =
5423                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5424                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5425
5426                 fs->h_u.tcp_ip4_spec.ip4dst =
5427                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5428                 fs->m_u.tcp_ip4_spec.ip4dst =
5429                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5430                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5431
5432                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5433                 fs->m_u.tcp_ip4_spec.psrc =
5434                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5435                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5436
5437                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5438                 fs->m_u.tcp_ip4_spec.pdst =
5439                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5440                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5441
5442                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5443                 fs->m_u.tcp_ip4_spec.tos =
5444                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5445                                 0 : rule->tuples_mask.ip_tos;
5446
5447                 break;
5448         case IP_USER_FLOW:
5449                 fs->h_u.usr_ip4_spec.ip4src =
5450                                 cpu_to_be32(rule->tuples.src_ip[3]);
5451                 fs->m_u.tcp_ip4_spec.ip4src =
5452                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5453                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5454
5455                 fs->h_u.usr_ip4_spec.ip4dst =
5456                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5457                 fs->m_u.usr_ip4_spec.ip4dst =
5458                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5459                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5460
5461                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5462                 fs->m_u.usr_ip4_spec.tos =
5463                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5464                                 0 : rule->tuples_mask.ip_tos;
5465
5466                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5467                 fs->m_u.usr_ip4_spec.proto =
5468                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5469                                 0 : rule->tuples_mask.ip_proto;
5470
5471                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5472
5473                 break;
5474         case SCTP_V6_FLOW:
5475         case TCP_V6_FLOW:
5476         case UDP_V6_FLOW:
5477                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5478                                   rule->tuples.src_ip, 4);
5479                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5480                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5481                 else
5482                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5483                                           rule->tuples_mask.src_ip, 4);
5484
5485                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5486                                   rule->tuples.dst_ip, 4);
5487                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5488                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5489                 else
5490                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5491                                           rule->tuples_mask.dst_ip, 4);
5492
5493                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5494                 fs->m_u.tcp_ip6_spec.psrc =
5495                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5496                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5497
5498                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5499                 fs->m_u.tcp_ip6_spec.pdst =
5500                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5501                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5502
5503                 break;
5504         case IPV6_USER_FLOW:
5505                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5506                                   rule->tuples.src_ip, 4);
5507                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5508                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5509                 else
5510                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5511                                           rule->tuples_mask.src_ip, 4);
5512
5513                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5514                                   rule->tuples.dst_ip, 4);
5515                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5516                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5517                 else
5518                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5519                                           rule->tuples_mask.dst_ip, 4);
5520
5521                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5522                 fs->m_u.usr_ip6_spec.l4_proto =
5523                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5524                                 0 : rule->tuples_mask.ip_proto;
5525
5526                 break;
5527         case ETHER_FLOW:
5528                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5529                                 rule->tuples.src_mac);
5530                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5531                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5532                 else
5533                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5534                                         rule->tuples_mask.src_mac);
5535
5536                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5537                                 rule->tuples.dst_mac);
5538                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5539                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5540                 else
5541                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5542                                         rule->tuples_mask.dst_mac);
5543
5544                 fs->h_u.ether_spec.h_proto =
5545                                 cpu_to_be16(rule->tuples.ether_proto);
5546                 fs->m_u.ether_spec.h_proto =
5547                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5548                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5549
5550                 break;
5551         default:
5552                 spin_unlock_bh(&hdev->fd_rule_lock);
5553                 return -EOPNOTSUPP;
5554         }
5555
5556         if (fs->flow_type & FLOW_EXT) {
5557                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5558                 fs->m_ext.vlan_tci =
5559                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5560                                 cpu_to_be16(VLAN_VID_MASK) :
5561                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5562         }
5563
5564         if (fs->flow_type & FLOW_MAC_EXT) {
5565                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5566                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5567                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5568                 else
5569                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5570                                         rule->tuples_mask.dst_mac);
5571         }
5572
5573         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5574                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5575         } else {
5576                 u64 vf_id;
5577
5578                 fs->ring_cookie = rule->queue_id;
5579                 vf_id = rule->vf_id;
5580                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5581                 fs->ring_cookie |= vf_id;
5582         }
5583
5584         spin_unlock_bh(&hdev->fd_rule_lock);
5585
5586         return 0;
5587 }
5588
5589 static int hclge_get_all_rules(struct hnae3_handle *handle,
5590                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5591 {
5592         struct hclge_vport *vport = hclge_get_vport(handle);
5593         struct hclge_dev *hdev = vport->back;
5594         struct hclge_fd_rule *rule;
5595         struct hlist_node *node2;
5596         int cnt = 0;
5597
5598         if (!hnae3_dev_fd_supported(hdev))
5599                 return -EOPNOTSUPP;
5600
5601         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5602
5603         spin_lock_bh(&hdev->fd_rule_lock);
5604         hlist_for_each_entry_safe(rule, node2,
5605                                   &hdev->fd_rule_list, rule_node) {
5606                 if (cnt == cmd->rule_cnt) {
5607                         spin_unlock_bh(&hdev->fd_rule_lock);
5608                         return -EMSGSIZE;
5609                 }
5610
5611                 rule_locs[cnt] = rule->location;
5612                 cnt++;
5613         }
5614
5615         spin_unlock_bh(&hdev->fd_rule_lock);
5616
5617         cmd->rule_cnt = cnt;
5618
5619         return 0;
5620 }
5621
5622 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5623                                      struct hclge_fd_rule_tuples *tuples)
5624 {
5625         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5626         tuples->ip_proto = fkeys->basic.ip_proto;
5627         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5628
5629         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5630                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5631                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5632         } else {
5633                 memcpy(tuples->src_ip,
5634                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5635                        sizeof(tuples->src_ip));
5636                 memcpy(tuples->dst_ip,
5637                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5638                        sizeof(tuples->dst_ip));
5639         }
5640 }
5641
5642 /* traverse all rules, check whether an existed rule has the same tuples */
5643 static struct hclge_fd_rule *
5644 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5645                           const struct hclge_fd_rule_tuples *tuples)
5646 {
5647         struct hclge_fd_rule *rule = NULL;
5648         struct hlist_node *node;
5649
5650         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5651                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5652                         return rule;
5653         }
5654
5655         return NULL;
5656 }
5657
5658 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5659                                      struct hclge_fd_rule *rule)
5660 {
5661         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5662                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5663                              BIT(INNER_SRC_PORT);
5664         rule->action = 0;
5665         rule->vf_id = 0;
5666         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5667         if (tuples->ether_proto == ETH_P_IP) {
5668                 if (tuples->ip_proto == IPPROTO_TCP)
5669                         rule->flow_type = TCP_V4_FLOW;
5670                 else
5671                         rule->flow_type = UDP_V4_FLOW;
5672         } else {
5673                 if (tuples->ip_proto == IPPROTO_TCP)
5674                         rule->flow_type = TCP_V6_FLOW;
5675                 else
5676                         rule->flow_type = UDP_V6_FLOW;
5677         }
5678         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5679         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5680 }
5681
5682 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5683                                       u16 flow_id, struct flow_keys *fkeys)
5684 {
5685 #ifdef CONFIG_RFS_ACCEL
5686         struct hclge_vport *vport = hclge_get_vport(handle);
5687         struct hclge_fd_rule_tuples new_tuples;
5688         struct hclge_dev *hdev = vport->back;
5689         struct hclge_fd_rule *rule;
5690         u16 tmp_queue_id;
5691         u16 bit_id;
5692         int ret;
5693
5694         if (!hnae3_dev_fd_supported(hdev))
5695                 return -EOPNOTSUPP;
5696
5697         memset(&new_tuples, 0, sizeof(new_tuples));
5698         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5699
5700         spin_lock_bh(&hdev->fd_rule_lock);
5701
5702         /* when there is already fd rule existed add by user,
5703          * arfs should not work
5704          */
5705         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5706                 spin_unlock_bh(&hdev->fd_rule_lock);
5707
5708                 return -EOPNOTSUPP;
5709         }
5710
5711         /* check is there flow director filter existed for this flow,
5712          * if not, create a new filter for it;
5713          * if filter exist with different queue id, modify the filter;
5714          * if filter exist with same queue id, do nothing
5715          */
5716         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5717         if (!rule) {
5718                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5719                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5720                         spin_unlock_bh(&hdev->fd_rule_lock);
5721
5722                         return -ENOSPC;
5723                 }
5724
5725                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5726                 if (!rule) {
5727                         spin_unlock_bh(&hdev->fd_rule_lock);
5728
5729                         return -ENOMEM;
5730                 }
5731
5732                 set_bit(bit_id, hdev->fd_bmap);
5733                 rule->location = bit_id;
5734                 rule->flow_id = flow_id;
5735                 rule->queue_id = queue_id;
5736                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5737                 ret = hclge_fd_config_rule(hdev, rule);
5738
5739                 spin_unlock_bh(&hdev->fd_rule_lock);
5740
5741                 if (ret)
5742                         return ret;
5743
5744                 return rule->location;
5745         }
5746
5747         spin_unlock_bh(&hdev->fd_rule_lock);
5748
5749         if (rule->queue_id == queue_id)
5750                 return rule->location;
5751
5752         tmp_queue_id = rule->queue_id;
5753         rule->queue_id = queue_id;
5754         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5755         if (ret) {
5756                 rule->queue_id = tmp_queue_id;
5757                 return ret;
5758         }
5759
5760         return rule->location;
5761 #endif
5762 }
5763
5764 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5765 {
5766 #ifdef CONFIG_RFS_ACCEL
5767         struct hnae3_handle *handle = &hdev->vport[0].nic;
5768         struct hclge_fd_rule *rule;
5769         struct hlist_node *node;
5770         HLIST_HEAD(del_list);
5771
5772         spin_lock_bh(&hdev->fd_rule_lock);
5773         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5774                 spin_unlock_bh(&hdev->fd_rule_lock);
5775                 return;
5776         }
5777         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5778                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5779                                         rule->flow_id, rule->location)) {
5780                         hlist_del_init(&rule->rule_node);
5781                         hlist_add_head(&rule->rule_node, &del_list);
5782                         hdev->hclge_fd_rule_num--;
5783                         clear_bit(rule->location, hdev->fd_bmap);
5784                 }
5785         }
5786         spin_unlock_bh(&hdev->fd_rule_lock);
5787
5788         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5789                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5790                                      rule->location, NULL, false);
5791                 kfree(rule);
5792         }
5793 #endif
5794 }
5795
5796 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5797 {
5798 #ifdef CONFIG_RFS_ACCEL
5799         struct hclge_vport *vport = hclge_get_vport(handle);
5800         struct hclge_dev *hdev = vport->back;
5801
5802         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5803                 hclge_del_all_fd_entries(handle, true);
5804 #endif
5805 }
5806
5807 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5808 {
5809         struct hclge_vport *vport = hclge_get_vport(handle);
5810         struct hclge_dev *hdev = vport->back;
5811
5812         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5813                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5814 }
5815
5816 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5817 {
5818         struct hclge_vport *vport = hclge_get_vport(handle);
5819         struct hclge_dev *hdev = vport->back;
5820
5821         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5822 }
5823
5824 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5825 {
5826         struct hclge_vport *vport = hclge_get_vport(handle);
5827         struct hclge_dev *hdev = vport->back;
5828
5829         return hdev->rst_stats.hw_reset_done_cnt;
5830 }
5831
5832 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5833 {
5834         struct hclge_vport *vport = hclge_get_vport(handle);
5835         struct hclge_dev *hdev = vport->back;
5836         bool clear;
5837
5838         hdev->fd_en = enable;
5839         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5840         if (!enable)
5841                 hclge_del_all_fd_entries(handle, clear);
5842         else
5843                 hclge_restore_fd_entries(handle);
5844 }
5845
5846 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5847 {
5848         struct hclge_desc desc;
5849         struct hclge_config_mac_mode_cmd *req =
5850                 (struct hclge_config_mac_mode_cmd *)desc.data;
5851         u32 loop_en = 0;
5852         int ret;
5853
5854         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5855         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5856         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5857         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5858         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5859         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5860         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5861         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5862         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5863         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5864         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5865         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5866         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5867         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5868         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5869         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5870
5871         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5872         if (ret)
5873                 dev_err(&hdev->pdev->dev,
5874                         "mac enable fail, ret =%d.\n", ret);
5875 }
5876
5877 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5878 {
5879         struct hclge_config_mac_mode_cmd *req;
5880         struct hclge_desc desc;
5881         u32 loop_en;
5882         int ret;
5883
5884         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5885         /* 1 Read out the MAC mode config at first */
5886         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5887         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5888         if (ret) {
5889                 dev_err(&hdev->pdev->dev,
5890                         "mac loopback get fail, ret =%d.\n", ret);
5891                 return ret;
5892         }
5893
5894         /* 2 Then setup the loopback flag */
5895         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5896         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5897         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5898         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5899
5900         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5901
5902         /* 3 Config mac work mode with loopback flag
5903          * and its original configure parameters
5904          */
5905         hclge_cmd_reuse_desc(&desc, false);
5906         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5907         if (ret)
5908                 dev_err(&hdev->pdev->dev,
5909                         "mac loopback set fail, ret =%d.\n", ret);
5910         return ret;
5911 }
5912
5913 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5914                                      enum hnae3_loop loop_mode)
5915 {
5916 #define HCLGE_SERDES_RETRY_MS   10
5917 #define HCLGE_SERDES_RETRY_NUM  100
5918
5919 #define HCLGE_MAC_LINK_STATUS_MS   10
5920 #define HCLGE_MAC_LINK_STATUS_NUM  100
5921 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5922 #define HCLGE_MAC_LINK_STATUS_UP   1
5923
5924         struct hclge_serdes_lb_cmd *req;
5925         struct hclge_desc desc;
5926         int mac_link_ret = 0;
5927         int ret, i = 0;
5928         u8 loop_mode_b;
5929
5930         req = (struct hclge_serdes_lb_cmd *)desc.data;
5931         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5932
5933         switch (loop_mode) {
5934         case HNAE3_LOOP_SERIAL_SERDES:
5935                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5936                 break;
5937         case HNAE3_LOOP_PARALLEL_SERDES:
5938                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5939                 break;
5940         default:
5941                 dev_err(&hdev->pdev->dev,
5942                         "unsupported serdes loopback mode %d\n", loop_mode);
5943                 return -ENOTSUPP;
5944         }
5945
5946         if (en) {
5947                 req->enable = loop_mode_b;
5948                 req->mask = loop_mode_b;
5949                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5950         } else {
5951                 req->mask = loop_mode_b;
5952                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5953         }
5954
5955         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5956         if (ret) {
5957                 dev_err(&hdev->pdev->dev,
5958                         "serdes loopback set fail, ret = %d\n", ret);
5959                 return ret;
5960         }
5961
5962         do {
5963                 msleep(HCLGE_SERDES_RETRY_MS);
5964                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5965                                            true);
5966                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5967                 if (ret) {
5968                         dev_err(&hdev->pdev->dev,
5969                                 "serdes loopback get, ret = %d\n", ret);
5970                         return ret;
5971                 }
5972         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5973                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5974
5975         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5976                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5977                 return -EBUSY;
5978         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5979                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5980                 return -EIO;
5981         }
5982
5983         hclge_cfg_mac_mode(hdev, en);
5984
5985         i = 0;
5986         do {
5987                 /* serdes Internal loopback, independent of the network cable.*/
5988                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5989                 ret = hclge_get_mac_link_status(hdev);
5990                 if (ret == mac_link_ret)
5991                         return 0;
5992         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5993
5994         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5995
5996         return -EBUSY;
5997 }
5998
5999 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
6000                             int stream_id, bool enable)
6001 {
6002         struct hclge_desc desc;
6003         struct hclge_cfg_com_tqp_queue_cmd *req =
6004                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6005         int ret;
6006
6007         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6008         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6009         req->stream_id = cpu_to_le16(stream_id);
6010         req->enable |= enable << HCLGE_TQP_ENABLE_B;
6011
6012         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6013         if (ret)
6014                 dev_err(&hdev->pdev->dev,
6015                         "Tqp enable fail, status =%d.\n", ret);
6016         return ret;
6017 }
6018
6019 static int hclge_set_loopback(struct hnae3_handle *handle,
6020                               enum hnae3_loop loop_mode, bool en)
6021 {
6022         struct hclge_vport *vport = hclge_get_vport(handle);
6023         struct hnae3_knic_private_info *kinfo;
6024         struct hclge_dev *hdev = vport->back;
6025         int i, ret;
6026
6027         switch (loop_mode) {
6028         case HNAE3_LOOP_APP:
6029                 ret = hclge_set_app_loopback(hdev, en);
6030                 break;
6031         case HNAE3_LOOP_SERIAL_SERDES:
6032         case HNAE3_LOOP_PARALLEL_SERDES:
6033                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6034                 break;
6035         default:
6036                 ret = -ENOTSUPP;
6037                 dev_err(&hdev->pdev->dev,
6038                         "loop_mode %d is not supported\n", loop_mode);
6039                 break;
6040         }
6041
6042         if (ret)
6043                 return ret;
6044
6045         kinfo = &vport->nic.kinfo;
6046         for (i = 0; i < kinfo->num_tqps; i++) {
6047                 ret = hclge_tqp_enable(hdev, i, 0, en);
6048                 if (ret)
6049                         return ret;
6050         }
6051
6052         return 0;
6053 }
6054
6055 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6056 {
6057         struct hclge_vport *vport = hclge_get_vport(handle);
6058         struct hnae3_knic_private_info *kinfo;
6059         struct hnae3_queue *queue;
6060         struct hclge_tqp *tqp;
6061         int i;
6062
6063         kinfo = &vport->nic.kinfo;
6064         for (i = 0; i < kinfo->num_tqps; i++) {
6065                 queue = handle->kinfo.tqp[i];
6066                 tqp = container_of(queue, struct hclge_tqp, q);
6067                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6068         }
6069 }
6070
6071 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6072 {
6073         struct hclge_vport *vport = hclge_get_vport(handle);
6074         struct hclge_dev *hdev = vport->back;
6075
6076         if (enable) {
6077                 mod_timer(&hdev->service_timer, jiffies + HZ);
6078         } else {
6079                 del_timer_sync(&hdev->service_timer);
6080                 cancel_work_sync(&hdev->service_task);
6081                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6082         }
6083 }
6084
6085 static int hclge_ae_start(struct hnae3_handle *handle)
6086 {
6087         struct hclge_vport *vport = hclge_get_vport(handle);
6088         struct hclge_dev *hdev = vport->back;
6089
6090         /* mac enable */
6091         hclge_cfg_mac_mode(hdev, true);
6092         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6093         hdev->hw.mac.link = 0;
6094
6095         /* reset tqp stats */
6096         hclge_reset_tqp_stats(handle);
6097
6098         hclge_mac_start_phy(hdev);
6099
6100         return 0;
6101 }
6102
6103 static void hclge_ae_stop(struct hnae3_handle *handle)
6104 {
6105         struct hclge_vport *vport = hclge_get_vport(handle);
6106         struct hclge_dev *hdev = vport->back;
6107         int i;
6108
6109         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6110
6111         hclge_clear_arfs_rules(handle);
6112
6113         /* If it is not PF reset, the firmware will disable the MAC,
6114          * so it only need to stop phy here.
6115          */
6116         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6117             hdev->reset_type != HNAE3_FUNC_RESET) {
6118                 hclge_mac_stop_phy(hdev);
6119                 return;
6120         }
6121
6122         for (i = 0; i < handle->kinfo.num_tqps; i++)
6123                 hclge_reset_tqp(handle, i);
6124
6125         /* Mac disable */
6126         hclge_cfg_mac_mode(hdev, false);
6127
6128         hclge_mac_stop_phy(hdev);
6129
6130         /* reset tqp stats */
6131         hclge_reset_tqp_stats(handle);
6132         hclge_update_link_status(hdev);
6133 }
6134
6135 int hclge_vport_start(struct hclge_vport *vport)
6136 {
6137         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6138         vport->last_active_jiffies = jiffies;
6139         return 0;
6140 }
6141
6142 void hclge_vport_stop(struct hclge_vport *vport)
6143 {
6144         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6145 }
6146
6147 static int hclge_client_start(struct hnae3_handle *handle)
6148 {
6149         struct hclge_vport *vport = hclge_get_vport(handle);
6150
6151         return hclge_vport_start(vport);
6152 }
6153
6154 static void hclge_client_stop(struct hnae3_handle *handle)
6155 {
6156         struct hclge_vport *vport = hclge_get_vport(handle);
6157
6158         hclge_vport_stop(vport);
6159 }
6160
6161 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6162                                          u16 cmdq_resp, u8  resp_code,
6163                                          enum hclge_mac_vlan_tbl_opcode op)
6164 {
6165         struct hclge_dev *hdev = vport->back;
6166         int return_status = -EIO;
6167
6168         if (cmdq_resp) {
6169                 dev_err(&hdev->pdev->dev,
6170                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6171                         cmdq_resp);
6172                 return -EIO;
6173         }
6174
6175         if (op == HCLGE_MAC_VLAN_ADD) {
6176                 if ((!resp_code) || (resp_code == 1)) {
6177                         return_status = 0;
6178                 } else if (resp_code == 2) {
6179                         return_status = -ENOSPC;
6180                         dev_err(&hdev->pdev->dev,
6181                                 "add mac addr failed for uc_overflow.\n");
6182                 } else if (resp_code == 3) {
6183                         return_status = -ENOSPC;
6184                         dev_err(&hdev->pdev->dev,
6185                                 "add mac addr failed for mc_overflow.\n");
6186                 } else {
6187                         dev_err(&hdev->pdev->dev,
6188                                 "add mac addr failed for undefined, code=%d.\n",
6189                                 resp_code);
6190                 }
6191         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6192                 if (!resp_code) {
6193                         return_status = 0;
6194                 } else if (resp_code == 1) {
6195                         return_status = -ENOENT;
6196                         dev_dbg(&hdev->pdev->dev,
6197                                 "remove mac addr failed for miss.\n");
6198                 } else {
6199                         dev_err(&hdev->pdev->dev,
6200                                 "remove mac addr failed for undefined, code=%d.\n",
6201                                 resp_code);
6202                 }
6203         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6204                 if (!resp_code) {
6205                         return_status = 0;
6206                 } else if (resp_code == 1) {
6207                         return_status = -ENOENT;
6208                         dev_dbg(&hdev->pdev->dev,
6209                                 "lookup mac addr failed for miss.\n");
6210                 } else {
6211                         dev_err(&hdev->pdev->dev,
6212                                 "lookup mac addr failed for undefined, code=%d.\n",
6213                                 resp_code);
6214                 }
6215         } else {
6216                 return_status = -EINVAL;
6217                 dev_err(&hdev->pdev->dev,
6218                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6219                         op);
6220         }
6221
6222         return return_status;
6223 }
6224
6225 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6226 {
6227         int word_num;
6228         int bit_num;
6229
6230         if (vfid > 255 || vfid < 0)
6231                 return -EIO;
6232
6233         if (vfid >= 0 && vfid <= 191) {
6234                 word_num = vfid / 32;
6235                 bit_num  = vfid % 32;
6236                 if (clr)
6237                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6238                 else
6239                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6240         } else {
6241                 word_num = (vfid - 192) / 32;
6242                 bit_num  = vfid % 32;
6243                 if (clr)
6244                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6245                 else
6246                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6247         }
6248
6249         return 0;
6250 }
6251
6252 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6253 {
6254 #define HCLGE_DESC_NUMBER 3
6255 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6256         int i, j;
6257
6258         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6259                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6260                         if (desc[i].data[j])
6261                                 return false;
6262
6263         return true;
6264 }
6265
6266 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6267                                    const u8 *addr, bool is_mc)
6268 {
6269         const unsigned char *mac_addr = addr;
6270         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6271                        (mac_addr[0]) | (mac_addr[1] << 8);
6272         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6273
6274         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6275         if (is_mc) {
6276                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6277                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6278         }
6279
6280         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6281         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6282 }
6283
6284 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6285                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6286 {
6287         struct hclge_dev *hdev = vport->back;
6288         struct hclge_desc desc;
6289         u8 resp_code;
6290         u16 retval;
6291         int ret;
6292
6293         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6294
6295         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6296
6297         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6298         if (ret) {
6299                 dev_err(&hdev->pdev->dev,
6300                         "del mac addr failed for cmd_send, ret =%d.\n",
6301                         ret);
6302                 return ret;
6303         }
6304         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6305         retval = le16_to_cpu(desc.retval);
6306
6307         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6308                                              HCLGE_MAC_VLAN_REMOVE);
6309 }
6310
6311 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6312                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6313                                      struct hclge_desc *desc,
6314                                      bool is_mc)
6315 {
6316         struct hclge_dev *hdev = vport->back;
6317         u8 resp_code;
6318         u16 retval;
6319         int ret;
6320
6321         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6322         if (is_mc) {
6323                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6324                 memcpy(desc[0].data,
6325                        req,
6326                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6327                 hclge_cmd_setup_basic_desc(&desc[1],
6328                                            HCLGE_OPC_MAC_VLAN_ADD,
6329                                            true);
6330                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6331                 hclge_cmd_setup_basic_desc(&desc[2],
6332                                            HCLGE_OPC_MAC_VLAN_ADD,
6333                                            true);
6334                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6335         } else {
6336                 memcpy(desc[0].data,
6337                        req,
6338                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6339                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6340         }
6341         if (ret) {
6342                 dev_err(&hdev->pdev->dev,
6343                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6344                         ret);
6345                 return ret;
6346         }
6347         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6348         retval = le16_to_cpu(desc[0].retval);
6349
6350         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6351                                              HCLGE_MAC_VLAN_LKUP);
6352 }
6353
6354 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6355                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6356                                   struct hclge_desc *mc_desc)
6357 {
6358         struct hclge_dev *hdev = vport->back;
6359         int cfg_status;
6360         u8 resp_code;
6361         u16 retval;
6362         int ret;
6363
6364         if (!mc_desc) {
6365                 struct hclge_desc desc;
6366
6367                 hclge_cmd_setup_basic_desc(&desc,
6368                                            HCLGE_OPC_MAC_VLAN_ADD,
6369                                            false);
6370                 memcpy(desc.data, req,
6371                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6372                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6373                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6374                 retval = le16_to_cpu(desc.retval);
6375
6376                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6377                                                            resp_code,
6378                                                            HCLGE_MAC_VLAN_ADD);
6379         } else {
6380                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6381                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6382                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6383                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6384                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6385                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6386                 memcpy(mc_desc[0].data, req,
6387                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6388                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6389                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6390                 retval = le16_to_cpu(mc_desc[0].retval);
6391
6392                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6393                                                            resp_code,
6394                                                            HCLGE_MAC_VLAN_ADD);
6395         }
6396
6397         if (ret) {
6398                 dev_err(&hdev->pdev->dev,
6399                         "add mac addr failed for cmd_send, ret =%d.\n",
6400                         ret);
6401                 return ret;
6402         }
6403
6404         return cfg_status;
6405 }
6406
6407 static int hclge_init_umv_space(struct hclge_dev *hdev)
6408 {
6409         u16 allocated_size = 0;
6410         int ret;
6411
6412         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6413                                   true);
6414         if (ret)
6415                 return ret;
6416
6417         if (allocated_size < hdev->wanted_umv_size)
6418                 dev_warn(&hdev->pdev->dev,
6419                          "Alloc umv space failed, want %d, get %d\n",
6420                          hdev->wanted_umv_size, allocated_size);
6421
6422         mutex_init(&hdev->umv_mutex);
6423         hdev->max_umv_size = allocated_size;
6424         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6425         hdev->share_umv_size = hdev->priv_umv_size +
6426                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6427
6428         return 0;
6429 }
6430
6431 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6432 {
6433         int ret;
6434
6435         if (hdev->max_umv_size > 0) {
6436                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6437                                           false);
6438                 if (ret)
6439                         return ret;
6440                 hdev->max_umv_size = 0;
6441         }
6442         mutex_destroy(&hdev->umv_mutex);
6443
6444         return 0;
6445 }
6446
6447 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6448                                u16 *allocated_size, bool is_alloc)
6449 {
6450         struct hclge_umv_spc_alc_cmd *req;
6451         struct hclge_desc desc;
6452         int ret;
6453
6454         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6455         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6456         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6457         req->space_size = cpu_to_le32(space_size);
6458
6459         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6460         if (ret) {
6461                 dev_err(&hdev->pdev->dev,
6462                         "%s umv space failed for cmd_send, ret =%d\n",
6463                         is_alloc ? "allocate" : "free", ret);
6464                 return ret;
6465         }
6466
6467         if (is_alloc && allocated_size)
6468                 *allocated_size = le32_to_cpu(desc.data[1]);
6469
6470         return 0;
6471 }
6472
6473 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6474 {
6475         struct hclge_vport *vport;
6476         int i;
6477
6478         for (i = 0; i < hdev->num_alloc_vport; i++) {
6479                 vport = &hdev->vport[i];
6480                 vport->used_umv_num = 0;
6481         }
6482
6483         mutex_lock(&hdev->umv_mutex);
6484         hdev->share_umv_size = hdev->priv_umv_size +
6485                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6486         mutex_unlock(&hdev->umv_mutex);
6487 }
6488
6489 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6490 {
6491         struct hclge_dev *hdev = vport->back;
6492         bool is_full;
6493
6494         mutex_lock(&hdev->umv_mutex);
6495         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6496                    hdev->share_umv_size == 0);
6497         mutex_unlock(&hdev->umv_mutex);
6498
6499         return is_full;
6500 }
6501
6502 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6503 {
6504         struct hclge_dev *hdev = vport->back;
6505
6506         mutex_lock(&hdev->umv_mutex);
6507         if (is_free) {
6508                 if (vport->used_umv_num > hdev->priv_umv_size)
6509                         hdev->share_umv_size++;
6510
6511                 if (vport->used_umv_num > 0)
6512                         vport->used_umv_num--;
6513         } else {
6514                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6515                     hdev->share_umv_size > 0)
6516                         hdev->share_umv_size--;
6517                 vport->used_umv_num++;
6518         }
6519         mutex_unlock(&hdev->umv_mutex);
6520 }
6521
6522 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6523                              const unsigned char *addr)
6524 {
6525         struct hclge_vport *vport = hclge_get_vport(handle);
6526
6527         return hclge_add_uc_addr_common(vport, addr);
6528 }
6529
6530 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6531                              const unsigned char *addr)
6532 {
6533         struct hclge_dev *hdev = vport->back;
6534         struct hclge_mac_vlan_tbl_entry_cmd req;
6535         struct hclge_desc desc;
6536         u16 egress_port = 0;
6537         int ret;
6538
6539         /* mac addr check */
6540         if (is_zero_ether_addr(addr) ||
6541             is_broadcast_ether_addr(addr) ||
6542             is_multicast_ether_addr(addr)) {
6543                 dev_err(&hdev->pdev->dev,
6544                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6545                          addr,
6546                          is_zero_ether_addr(addr),
6547                          is_broadcast_ether_addr(addr),
6548                          is_multicast_ether_addr(addr));
6549                 return -EINVAL;
6550         }
6551
6552         memset(&req, 0, sizeof(req));
6553
6554         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6555                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6556
6557         req.egress_port = cpu_to_le16(egress_port);
6558
6559         hclge_prepare_mac_addr(&req, addr, false);
6560
6561         /* Lookup the mac address in the mac_vlan table, and add
6562          * it if the entry is inexistent. Repeated unicast entry
6563          * is not allowed in the mac vlan table.
6564          */
6565         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6566         if (ret == -ENOENT) {
6567                 if (!hclge_is_umv_space_full(vport)) {
6568                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6569                         if (!ret)
6570                                 hclge_update_umv_space(vport, false);
6571                         return ret;
6572                 }
6573
6574                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6575                         hdev->priv_umv_size);
6576
6577                 return -ENOSPC;
6578         }
6579
6580         /* check if we just hit the duplicate */
6581         if (!ret) {
6582                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6583                          vport->vport_id, addr);
6584                 return 0;
6585         }
6586
6587         dev_err(&hdev->pdev->dev,
6588                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6589                 addr);
6590
6591         return ret;
6592 }
6593
6594 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6595                             const unsigned char *addr)
6596 {
6597         struct hclge_vport *vport = hclge_get_vport(handle);
6598
6599         return hclge_rm_uc_addr_common(vport, addr);
6600 }
6601
6602 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6603                             const unsigned char *addr)
6604 {
6605         struct hclge_dev *hdev = vport->back;
6606         struct hclge_mac_vlan_tbl_entry_cmd req;
6607         int ret;
6608
6609         /* mac addr check */
6610         if (is_zero_ether_addr(addr) ||
6611             is_broadcast_ether_addr(addr) ||
6612             is_multicast_ether_addr(addr)) {
6613                 dev_dbg(&hdev->pdev->dev,
6614                         "Remove mac err! invalid mac:%pM.\n",
6615                          addr);
6616                 return -EINVAL;
6617         }
6618
6619         memset(&req, 0, sizeof(req));
6620         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6621         hclge_prepare_mac_addr(&req, addr, false);
6622         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6623         if (!ret)
6624                 hclge_update_umv_space(vport, true);
6625
6626         return ret;
6627 }
6628
6629 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6630                              const unsigned char *addr)
6631 {
6632         struct hclge_vport *vport = hclge_get_vport(handle);
6633
6634         return hclge_add_mc_addr_common(vport, addr);
6635 }
6636
6637 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6638                              const unsigned char *addr)
6639 {
6640         struct hclge_dev *hdev = vport->back;
6641         struct hclge_mac_vlan_tbl_entry_cmd req;
6642         struct hclge_desc desc[3];
6643         int status;
6644
6645         /* mac addr check */
6646         if (!is_multicast_ether_addr(addr)) {
6647                 dev_err(&hdev->pdev->dev,
6648                         "Add mc mac err! invalid mac:%pM.\n",
6649                          addr);
6650                 return -EINVAL;
6651         }
6652         memset(&req, 0, sizeof(req));
6653         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6654         hclge_prepare_mac_addr(&req, addr, true);
6655         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6656         if (!status) {
6657                 /* This mac addr exist, update VFID for it */
6658                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6659                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6660         } else {
6661                 /* This mac addr do not exist, add new entry for it */
6662                 memset(desc[0].data, 0, sizeof(desc[0].data));
6663                 memset(desc[1].data, 0, sizeof(desc[0].data));
6664                 memset(desc[2].data, 0, sizeof(desc[0].data));
6665                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6666                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6667         }
6668
6669         if (status == -ENOSPC)
6670                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6671
6672         return status;
6673 }
6674
6675 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6676                             const unsigned char *addr)
6677 {
6678         struct hclge_vport *vport = hclge_get_vport(handle);
6679
6680         return hclge_rm_mc_addr_common(vport, addr);
6681 }
6682
6683 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6684                             const unsigned char *addr)
6685 {
6686         struct hclge_dev *hdev = vport->back;
6687         struct hclge_mac_vlan_tbl_entry_cmd req;
6688         enum hclge_cmd_status status;
6689         struct hclge_desc desc[3];
6690
6691         /* mac addr check */
6692         if (!is_multicast_ether_addr(addr)) {
6693                 dev_dbg(&hdev->pdev->dev,
6694                         "Remove mc mac err! invalid mac:%pM.\n",
6695                          addr);
6696                 return -EINVAL;
6697         }
6698
6699         memset(&req, 0, sizeof(req));
6700         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6701         hclge_prepare_mac_addr(&req, addr, true);
6702         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6703         if (!status) {
6704                 /* This mac addr exist, remove this handle's VFID for it */
6705                 hclge_update_desc_vfid(desc, vport->vport_id, true);
6706
6707                 if (hclge_is_all_function_id_zero(desc))
6708                         /* All the vfid is zero, so need to delete this entry */
6709                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6710                 else
6711                         /* Not all the vfid is zero, update the vfid */
6712                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6713
6714         } else {
6715                 /* Maybe this mac address is in mta table, but it cannot be
6716                  * deleted here because an entry of mta represents an address
6717                  * range rather than a specific address. the delete action to
6718                  * all entries will take effect in update_mta_status called by
6719                  * hns3_nic_set_rx_mode.
6720                  */
6721                 status = 0;
6722         }
6723
6724         return status;
6725 }
6726
6727 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6728                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6729 {
6730         struct hclge_vport_mac_addr_cfg *mac_cfg;
6731         struct list_head *list;
6732
6733         if (!vport->vport_id)
6734                 return;
6735
6736         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6737         if (!mac_cfg)
6738                 return;
6739
6740         mac_cfg->hd_tbl_status = true;
6741         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6742
6743         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6744                &vport->uc_mac_list : &vport->mc_mac_list;
6745
6746         list_add_tail(&mac_cfg->node, list);
6747 }
6748
6749 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6750                               bool is_write_tbl,
6751                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6752 {
6753         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6754         struct list_head *list;
6755         bool uc_flag, mc_flag;
6756
6757         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6758                &vport->uc_mac_list : &vport->mc_mac_list;
6759
6760         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6761         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6762
6763         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6764                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6765                         if (uc_flag && mac_cfg->hd_tbl_status)
6766                                 hclge_rm_uc_addr_common(vport, mac_addr);
6767
6768                         if (mc_flag && mac_cfg->hd_tbl_status)
6769                                 hclge_rm_mc_addr_common(vport, mac_addr);
6770
6771                         list_del(&mac_cfg->node);
6772                         kfree(mac_cfg);
6773                         break;
6774                 }
6775         }
6776 }
6777
6778 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6779                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6780 {
6781         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6782         struct list_head *list;
6783
6784         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6785                &vport->uc_mac_list : &vport->mc_mac_list;
6786
6787         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6788                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6789                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6790
6791                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6792                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6793
6794                 mac_cfg->hd_tbl_status = false;
6795                 if (is_del_list) {
6796                         list_del(&mac_cfg->node);
6797                         kfree(mac_cfg);
6798                 }
6799         }
6800 }
6801
6802 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6803 {
6804         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6805         struct hclge_vport *vport;
6806         int i;
6807
6808         mutex_lock(&hdev->vport_cfg_mutex);
6809         for (i = 0; i < hdev->num_alloc_vport; i++) {
6810                 vport = &hdev->vport[i];
6811                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6812                         list_del(&mac->node);
6813                         kfree(mac);
6814                 }
6815
6816                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6817                         list_del(&mac->node);
6818                         kfree(mac);
6819                 }
6820         }
6821         mutex_unlock(&hdev->vport_cfg_mutex);
6822 }
6823
6824 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6825                                               u16 cmdq_resp, u8 resp_code)
6826 {
6827 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6828 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6829 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6830 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6831
6832         int return_status;
6833
6834         if (cmdq_resp) {
6835                 dev_err(&hdev->pdev->dev,
6836                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6837                         cmdq_resp);
6838                 return -EIO;
6839         }
6840
6841         switch (resp_code) {
6842         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6843         case HCLGE_ETHERTYPE_ALREADY_ADD:
6844                 return_status = 0;
6845                 break;
6846         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6847                 dev_err(&hdev->pdev->dev,
6848                         "add mac ethertype failed for manager table overflow.\n");
6849                 return_status = -EIO;
6850                 break;
6851         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6852                 dev_err(&hdev->pdev->dev,
6853                         "add mac ethertype failed for key conflict.\n");
6854                 return_status = -EIO;
6855                 break;
6856         default:
6857                 dev_err(&hdev->pdev->dev,
6858                         "add mac ethertype failed for undefined, code=%d.\n",
6859                         resp_code);
6860                 return_status = -EIO;
6861         }
6862
6863         return return_status;
6864 }
6865
6866 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6867                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6868 {
6869         struct hclge_desc desc;
6870         u8 resp_code;
6871         u16 retval;
6872         int ret;
6873
6874         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6875         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6876
6877         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6878         if (ret) {
6879                 dev_err(&hdev->pdev->dev,
6880                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6881                         ret);
6882                 return ret;
6883         }
6884
6885         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6886         retval = le16_to_cpu(desc.retval);
6887
6888         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6889 }
6890
6891 static int init_mgr_tbl(struct hclge_dev *hdev)
6892 {
6893         int ret;
6894         int i;
6895
6896         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6897                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6898                 if (ret) {
6899                         dev_err(&hdev->pdev->dev,
6900                                 "add mac ethertype failed, ret =%d.\n",
6901                                 ret);
6902                         return ret;
6903                 }
6904         }
6905
6906         return 0;
6907 }
6908
6909 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6910 {
6911         struct hclge_vport *vport = hclge_get_vport(handle);
6912         struct hclge_dev *hdev = vport->back;
6913
6914         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6915 }
6916
6917 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6918                               bool is_first)
6919 {
6920         const unsigned char *new_addr = (const unsigned char *)p;
6921         struct hclge_vport *vport = hclge_get_vport(handle);
6922         struct hclge_dev *hdev = vport->back;
6923         int ret;
6924
6925         /* mac addr check */
6926         if (is_zero_ether_addr(new_addr) ||
6927             is_broadcast_ether_addr(new_addr) ||
6928             is_multicast_ether_addr(new_addr)) {
6929                 dev_err(&hdev->pdev->dev,
6930                         "Change uc mac err! invalid mac:%p.\n",
6931                          new_addr);
6932                 return -EINVAL;
6933         }
6934
6935         if ((!is_first || is_kdump_kernel()) &&
6936             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6937                 dev_warn(&hdev->pdev->dev,
6938                          "remove old uc mac address fail.\n");
6939
6940         ret = hclge_add_uc_addr(handle, new_addr);
6941         if (ret) {
6942                 dev_err(&hdev->pdev->dev,
6943                         "add uc mac address fail, ret =%d.\n",
6944                         ret);
6945
6946                 if (!is_first &&
6947                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6948                         dev_err(&hdev->pdev->dev,
6949                                 "restore uc mac address fail.\n");
6950
6951                 return -EIO;
6952         }
6953
6954         ret = hclge_pause_addr_cfg(hdev, new_addr);
6955         if (ret) {
6956                 dev_err(&hdev->pdev->dev,
6957                         "configure mac pause address fail, ret =%d.\n",
6958                         ret);
6959                 return -EIO;
6960         }
6961
6962         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6963
6964         return 0;
6965 }
6966
6967 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6968                           int cmd)
6969 {
6970         struct hclge_vport *vport = hclge_get_vport(handle);
6971         struct hclge_dev *hdev = vport->back;
6972
6973         if (!hdev->hw.mac.phydev)
6974                 return -EOPNOTSUPP;
6975
6976         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6977 }
6978
6979 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6980                                       u8 fe_type, bool filter_en, u8 vf_id)
6981 {
6982         struct hclge_vlan_filter_ctrl_cmd *req;
6983         struct hclge_desc desc;
6984         int ret;
6985
6986         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6987
6988         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6989         req->vlan_type = vlan_type;
6990         req->vlan_fe = filter_en ? fe_type : 0;
6991         req->vf_id = vf_id;
6992
6993         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6994         if (ret)
6995                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6996                         ret);
6997
6998         return ret;
6999 }
7000
7001 #define HCLGE_FILTER_TYPE_VF            0
7002 #define HCLGE_FILTER_TYPE_PORT          1
7003 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7004 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7005 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7006 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7007 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7008 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7009                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7010 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7011                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7012
7013 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7014 {
7015         struct hclge_vport *vport = hclge_get_vport(handle);
7016         struct hclge_dev *hdev = vport->back;
7017
7018         if (hdev->pdev->revision >= 0x21) {
7019                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7020                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7021                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7022                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7023         } else {
7024                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7025                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7026                                            0);
7027         }
7028         if (enable)
7029                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7030         else
7031                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7032 }
7033
7034 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7035                                     bool is_kill, u16 vlan, u8 qos,
7036                                     __be16 proto)
7037 {
7038 #define HCLGE_MAX_VF_BYTES  16
7039         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7040         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7041         struct hclge_desc desc[2];
7042         u8 vf_byte_val;
7043         u8 vf_byte_off;
7044         int ret;
7045
7046         hclge_cmd_setup_basic_desc(&desc[0],
7047                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7048         hclge_cmd_setup_basic_desc(&desc[1],
7049                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7050
7051         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7052
7053         vf_byte_off = vfid / 8;
7054         vf_byte_val = 1 << (vfid % 8);
7055
7056         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7057         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7058
7059         req0->vlan_id  = cpu_to_le16(vlan);
7060         req0->vlan_cfg = is_kill;
7061
7062         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7063                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7064         else
7065                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7066
7067         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7068         if (ret) {
7069                 dev_err(&hdev->pdev->dev,
7070                         "Send vf vlan command fail, ret =%d.\n",
7071                         ret);
7072                 return ret;
7073         }
7074
7075         if (!is_kill) {
7076 #define HCLGE_VF_VLAN_NO_ENTRY  2
7077                 if (!req0->resp_code || req0->resp_code == 1)
7078                         return 0;
7079
7080                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7081                         dev_warn(&hdev->pdev->dev,
7082                                  "vf vlan table is full, vf vlan filter is disabled\n");
7083                         return 0;
7084                 }
7085
7086                 dev_err(&hdev->pdev->dev,
7087                         "Add vf vlan filter fail, ret =%d.\n",
7088                         req0->resp_code);
7089         } else {
7090 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7091                 if (!req0->resp_code)
7092                         return 0;
7093
7094                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7095                         dev_warn(&hdev->pdev->dev,
7096                                  "vlan %d filter is not in vf vlan table\n",
7097                                  vlan);
7098                         return 0;
7099                 }
7100
7101                 dev_err(&hdev->pdev->dev,
7102                         "Kill vf vlan filter fail, ret =%d.\n",
7103                         req0->resp_code);
7104         }
7105
7106         return -EIO;
7107 }
7108
7109 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7110                                       u16 vlan_id, bool is_kill)
7111 {
7112         struct hclge_vlan_filter_pf_cfg_cmd *req;
7113         struct hclge_desc desc;
7114         u8 vlan_offset_byte_val;
7115         u8 vlan_offset_byte;
7116         u8 vlan_offset_160;
7117         int ret;
7118
7119         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7120
7121         vlan_offset_160 = vlan_id / 160;
7122         vlan_offset_byte = (vlan_id % 160) / 8;
7123         vlan_offset_byte_val = 1 << (vlan_id % 8);
7124
7125         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7126         req->vlan_offset = vlan_offset_160;
7127         req->vlan_cfg = is_kill;
7128         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7129
7130         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7131         if (ret)
7132                 dev_err(&hdev->pdev->dev,
7133                         "port vlan command, send fail, ret =%d.\n", ret);
7134         return ret;
7135 }
7136
7137 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7138                                     u16 vport_id, u16 vlan_id, u8 qos,
7139                                     bool is_kill)
7140 {
7141         u16 vport_idx, vport_num = 0;
7142         int ret;
7143
7144         if (is_kill && !vlan_id)
7145                 return 0;
7146
7147         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7148                                        0, proto);
7149         if (ret) {
7150                 dev_err(&hdev->pdev->dev,
7151                         "Set %d vport vlan filter config fail, ret =%d.\n",
7152                         vport_id, ret);
7153                 return ret;
7154         }
7155
7156         /* vlan 0 may be added twice when 8021q module is enabled */
7157         if (!is_kill && !vlan_id &&
7158             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7159                 return 0;
7160
7161         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7162                 dev_err(&hdev->pdev->dev,
7163                         "Add port vlan failed, vport %d is already in vlan %d\n",
7164                         vport_id, vlan_id);
7165                 return -EINVAL;
7166         }
7167
7168         if (is_kill &&
7169             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7170                 dev_err(&hdev->pdev->dev,
7171                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7172                         vport_id, vlan_id);
7173                 return -EINVAL;
7174         }
7175
7176         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7177                 vport_num++;
7178
7179         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7180                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7181                                                  is_kill);
7182
7183         return ret;
7184 }
7185
7186 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7187 {
7188         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7189         struct hclge_vport_vtag_tx_cfg_cmd *req;
7190         struct hclge_dev *hdev = vport->back;
7191         struct hclge_desc desc;
7192         int status;
7193
7194         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7195
7196         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7197         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7198         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7199         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7200                       vcfg->accept_tag1 ? 1 : 0);
7201         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7202                       vcfg->accept_untag1 ? 1 : 0);
7203         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7204                       vcfg->accept_tag2 ? 1 : 0);
7205         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7206                       vcfg->accept_untag2 ? 1 : 0);
7207         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7208                       vcfg->insert_tag1_en ? 1 : 0);
7209         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7210                       vcfg->insert_tag2_en ? 1 : 0);
7211         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7212
7213         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7214         req->vf_bitmap[req->vf_offset] =
7215                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7216
7217         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7218         if (status)
7219                 dev_err(&hdev->pdev->dev,
7220                         "Send port txvlan cfg command fail, ret =%d\n",
7221                         status);
7222
7223         return status;
7224 }
7225
7226 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7227 {
7228         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7229         struct hclge_vport_vtag_rx_cfg_cmd *req;
7230         struct hclge_dev *hdev = vport->back;
7231         struct hclge_desc desc;
7232         int status;
7233
7234         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7235
7236         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7237         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7238                       vcfg->strip_tag1_en ? 1 : 0);
7239         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7240                       vcfg->strip_tag2_en ? 1 : 0);
7241         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7242                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7243         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7244                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7245
7246         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7247         req->vf_bitmap[req->vf_offset] =
7248                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7249
7250         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7251         if (status)
7252                 dev_err(&hdev->pdev->dev,
7253                         "Send port rxvlan cfg command fail, ret =%d\n",
7254                         status);
7255
7256         return status;
7257 }
7258
7259 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7260                                   u16 port_base_vlan_state,
7261                                   u16 vlan_tag)
7262 {
7263         int ret;
7264
7265         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7266                 vport->txvlan_cfg.accept_tag1 = true;
7267                 vport->txvlan_cfg.insert_tag1_en = false;
7268                 vport->txvlan_cfg.default_tag1 = 0;
7269         } else {
7270                 vport->txvlan_cfg.accept_tag1 = false;
7271                 vport->txvlan_cfg.insert_tag1_en = true;
7272                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7273         }
7274
7275         vport->txvlan_cfg.accept_untag1 = true;
7276
7277         /* accept_tag2 and accept_untag2 are not supported on
7278          * pdev revision(0x20), new revision support them,
7279          * this two fields can not be configured by user.
7280          */
7281         vport->txvlan_cfg.accept_tag2 = true;
7282         vport->txvlan_cfg.accept_untag2 = true;
7283         vport->txvlan_cfg.insert_tag2_en = false;
7284         vport->txvlan_cfg.default_tag2 = 0;
7285
7286         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7287                 vport->rxvlan_cfg.strip_tag1_en = false;
7288                 vport->rxvlan_cfg.strip_tag2_en =
7289                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7290         } else {
7291                 vport->rxvlan_cfg.strip_tag1_en =
7292                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7293                 vport->rxvlan_cfg.strip_tag2_en = true;
7294         }
7295         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7296         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7297
7298         ret = hclge_set_vlan_tx_offload_cfg(vport);
7299         if (ret)
7300                 return ret;
7301
7302         return hclge_set_vlan_rx_offload_cfg(vport);
7303 }
7304
7305 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7306 {
7307         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7308         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7309         struct hclge_desc desc;
7310         int status;
7311
7312         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7313         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7314         rx_req->ot_fst_vlan_type =
7315                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7316         rx_req->ot_sec_vlan_type =
7317                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7318         rx_req->in_fst_vlan_type =
7319                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7320         rx_req->in_sec_vlan_type =
7321                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7322
7323         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7324         if (status) {
7325                 dev_err(&hdev->pdev->dev,
7326                         "Send rxvlan protocol type command fail, ret =%d\n",
7327                         status);
7328                 return status;
7329         }
7330
7331         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7332
7333         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7334         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7335         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7336
7337         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7338         if (status)
7339                 dev_err(&hdev->pdev->dev,
7340                         "Send txvlan protocol type command fail, ret =%d\n",
7341                         status);
7342
7343         return status;
7344 }
7345
7346 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7347 {
7348 #define HCLGE_DEF_VLAN_TYPE             0x8100
7349
7350         struct hnae3_handle *handle = &hdev->vport[0].nic;
7351         struct hclge_vport *vport;
7352         int ret;
7353         int i;
7354
7355         if (hdev->pdev->revision >= 0x21) {
7356                 /* for revision 0x21, vf vlan filter is per function */
7357                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7358                         vport = &hdev->vport[i];
7359                         ret = hclge_set_vlan_filter_ctrl(hdev,
7360                                                          HCLGE_FILTER_TYPE_VF,
7361                                                          HCLGE_FILTER_FE_EGRESS,
7362                                                          true,
7363                                                          vport->vport_id);
7364                         if (ret)
7365                                 return ret;
7366                 }
7367
7368                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7369                                                  HCLGE_FILTER_FE_INGRESS, true,
7370                                                  0);
7371                 if (ret)
7372                         return ret;
7373         } else {
7374                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7375                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7376                                                  true, 0);
7377                 if (ret)
7378                         return ret;
7379         }
7380
7381         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7382
7383         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7384         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7385         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7386         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7387         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7388         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7389
7390         ret = hclge_set_vlan_protocol_type(hdev);
7391         if (ret)
7392                 return ret;
7393
7394         for (i = 0; i < hdev->num_alloc_vport; i++) {
7395                 u16 vlan_tag;
7396
7397                 vport = &hdev->vport[i];
7398                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7399
7400                 ret = hclge_vlan_offload_cfg(vport,
7401                                              vport->port_base_vlan_cfg.state,
7402                                              vlan_tag);
7403                 if (ret)
7404                         return ret;
7405         }
7406
7407         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7408 }
7409
7410 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7411                                        bool writen_to_tbl)
7412 {
7413         struct hclge_vport_vlan_cfg *vlan;
7414
7415         /* vlan 0 is reserved */
7416         if (!vlan_id)
7417                 return;
7418
7419         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7420         if (!vlan)
7421                 return;
7422
7423         vlan->hd_tbl_status = writen_to_tbl;
7424         vlan->vlan_id = vlan_id;
7425
7426         list_add_tail(&vlan->node, &vport->vlan_list);
7427 }
7428
7429 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7430 {
7431         struct hclge_vport_vlan_cfg *vlan, *tmp;
7432         struct hclge_dev *hdev = vport->back;
7433         int ret;
7434
7435         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7436                 if (!vlan->hd_tbl_status) {
7437                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7438                                                        vport->vport_id,
7439                                                        vlan->vlan_id, 0, false);
7440                         if (ret) {
7441                                 dev_err(&hdev->pdev->dev,
7442                                         "restore vport vlan list failed, ret=%d\n",
7443                                         ret);
7444                                 return ret;
7445                         }
7446                 }
7447                 vlan->hd_tbl_status = true;
7448         }
7449
7450         return 0;
7451 }
7452
7453 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7454                                       bool is_write_tbl)
7455 {
7456         struct hclge_vport_vlan_cfg *vlan, *tmp;
7457         struct hclge_dev *hdev = vport->back;
7458
7459         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7460                 if (vlan->vlan_id == vlan_id) {
7461                         if (is_write_tbl && vlan->hd_tbl_status)
7462                                 hclge_set_vlan_filter_hw(hdev,
7463                                                          htons(ETH_P_8021Q),
7464                                                          vport->vport_id,
7465                                                          vlan_id, 0,
7466                                                          true);
7467
7468                         list_del(&vlan->node);
7469                         kfree(vlan);
7470                         break;
7471                 }
7472         }
7473 }
7474
7475 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7476 {
7477         struct hclge_vport_vlan_cfg *vlan, *tmp;
7478         struct hclge_dev *hdev = vport->back;
7479
7480         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7481                 if (vlan->hd_tbl_status)
7482                         hclge_set_vlan_filter_hw(hdev,
7483                                                  htons(ETH_P_8021Q),
7484                                                  vport->vport_id,
7485                                                  vlan->vlan_id, 0,
7486                                                  true);
7487
7488                 vlan->hd_tbl_status = false;
7489                 if (is_del_list) {
7490                         list_del(&vlan->node);
7491                         kfree(vlan);
7492                 }
7493         }
7494 }
7495
7496 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7497 {
7498         struct hclge_vport_vlan_cfg *vlan, *tmp;
7499         struct hclge_vport *vport;
7500         int i;
7501
7502         mutex_lock(&hdev->vport_cfg_mutex);
7503         for (i = 0; i < hdev->num_alloc_vport; i++) {
7504                 vport = &hdev->vport[i];
7505                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7506                         list_del(&vlan->node);
7507                         kfree(vlan);
7508                 }
7509         }
7510         mutex_unlock(&hdev->vport_cfg_mutex);
7511 }
7512
7513 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7514 {
7515         struct hclge_vport *vport = hclge_get_vport(handle);
7516
7517         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7518                 vport->rxvlan_cfg.strip_tag1_en = false;
7519                 vport->rxvlan_cfg.strip_tag2_en = enable;
7520         } else {
7521                 vport->rxvlan_cfg.strip_tag1_en = enable;
7522                 vport->rxvlan_cfg.strip_tag2_en = true;
7523         }
7524         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7525         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7526         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7527
7528         return hclge_set_vlan_rx_offload_cfg(vport);
7529 }
7530
7531 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7532                                             u16 port_base_vlan_state,
7533                                             struct hclge_vlan_info *new_info,
7534                                             struct hclge_vlan_info *old_info)
7535 {
7536         struct hclge_dev *hdev = vport->back;
7537         int ret;
7538
7539         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7540                 hclge_rm_vport_all_vlan_table(vport, false);
7541                 return hclge_set_vlan_filter_hw(hdev,
7542                                                  htons(new_info->vlan_proto),
7543                                                  vport->vport_id,
7544                                                  new_info->vlan_tag,
7545                                                  new_info->qos, false);
7546         }
7547
7548         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7549                                        vport->vport_id, old_info->vlan_tag,
7550                                        old_info->qos, true);
7551         if (ret)
7552                 return ret;
7553
7554         return hclge_add_vport_all_vlan_table(vport);
7555 }
7556
7557 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7558                                     struct hclge_vlan_info *vlan_info)
7559 {
7560         struct hnae3_handle *nic = &vport->nic;
7561         struct hclge_vlan_info *old_vlan_info;
7562         struct hclge_dev *hdev = vport->back;
7563         int ret;
7564
7565         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7566
7567         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7568         if (ret)
7569                 return ret;
7570
7571         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7572                 /* add new VLAN tag */
7573                 ret = hclge_set_vlan_filter_hw(hdev,
7574                                                htons(vlan_info->vlan_proto),
7575                                                vport->vport_id,
7576                                                vlan_info->vlan_tag,
7577                                                vlan_info->qos, false);
7578                 if (ret)
7579                         return ret;
7580
7581                 /* remove old VLAN tag */
7582                 ret = hclge_set_vlan_filter_hw(hdev,
7583                                                htons(old_vlan_info->vlan_proto),
7584                                                vport->vport_id,
7585                                                old_vlan_info->vlan_tag,
7586                                                old_vlan_info->qos, true);
7587                 if (ret)
7588                         return ret;
7589
7590                 goto update;
7591         }
7592
7593         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7594                                                old_vlan_info);
7595         if (ret)
7596                 return ret;
7597
7598         /* update state only when disable/enable port based VLAN */
7599         vport->port_base_vlan_cfg.state = state;
7600         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7601                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7602         else
7603                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7604
7605 update:
7606         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7607         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7608         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7609
7610         return 0;
7611 }
7612
7613 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7614                                           enum hnae3_port_base_vlan_state state,
7615                                           u16 vlan)
7616 {
7617         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7618                 if (!vlan)
7619                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7620                 else
7621                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7622         } else {
7623                 if (!vlan)
7624                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7625                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7626                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7627                 else
7628                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7629         }
7630 }
7631
7632 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7633                                     u16 vlan, u8 qos, __be16 proto)
7634 {
7635         struct hclge_vport *vport = hclge_get_vport(handle);
7636         struct hclge_dev *hdev = vport->back;
7637         struct hclge_vlan_info vlan_info;
7638         u16 state;
7639         int ret;
7640
7641         if (hdev->pdev->revision == 0x20)
7642                 return -EOPNOTSUPP;
7643
7644         /* qos is a 3 bits value, so can not be bigger than 7 */
7645         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7646                 return -EINVAL;
7647         if (proto != htons(ETH_P_8021Q))
7648                 return -EPROTONOSUPPORT;
7649
7650         vport = &hdev->vport[vfid];
7651         state = hclge_get_port_base_vlan_state(vport,
7652                                                vport->port_base_vlan_cfg.state,
7653                                                vlan);
7654         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7655                 return 0;
7656
7657         vlan_info.vlan_tag = vlan;
7658         vlan_info.qos = qos;
7659         vlan_info.vlan_proto = ntohs(proto);
7660
7661         /* update port based VLAN for PF */
7662         if (!vfid) {
7663                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7664                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7665                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7666
7667                 return ret;
7668         }
7669
7670         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7671                 return hclge_update_port_base_vlan_cfg(vport, state,
7672                                                        &vlan_info);
7673         } else {
7674                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7675                                                         (u8)vfid, state,
7676                                                         vlan, qos,
7677                                                         ntohs(proto));
7678                 return ret;
7679         }
7680 }
7681
7682 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7683                           u16 vlan_id, bool is_kill)
7684 {
7685         struct hclge_vport *vport = hclge_get_vport(handle);
7686         struct hclge_dev *hdev = vport->back;
7687         bool writen_to_tbl = false;
7688         int ret = 0;
7689
7690         /* when port based VLAN enabled, we use port based VLAN as the VLAN
7691          * filter entry. In this case, we don't update VLAN filter table
7692          * when user add new VLAN or remove exist VLAN, just update the vport
7693          * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7694          * table until port based VLAN disabled
7695          */
7696         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7697                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7698                                                vlan_id, 0, is_kill);
7699                 writen_to_tbl = true;
7700         }
7701
7702         if (ret)
7703                 return ret;
7704
7705         if (is_kill)
7706                 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7707         else
7708                 hclge_add_vport_vlan_table(vport, vlan_id,
7709                                            writen_to_tbl);
7710
7711         return 0;
7712 }
7713
7714 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7715 {
7716         struct hclge_config_max_frm_size_cmd *req;
7717         struct hclge_desc desc;
7718
7719         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7720
7721         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7722         req->max_frm_size = cpu_to_le16(new_mps);
7723         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7724
7725         return hclge_cmd_send(&hdev->hw, &desc, 1);
7726 }
7727
7728 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7729 {
7730         struct hclge_vport *vport = hclge_get_vport(handle);
7731
7732         return hclge_set_vport_mtu(vport, new_mtu);
7733 }
7734
7735 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7736 {
7737         struct hclge_dev *hdev = vport->back;
7738         int i, max_frm_size, ret = 0;
7739
7740         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7741         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7742             max_frm_size > HCLGE_MAC_MAX_FRAME)
7743                 return -EINVAL;
7744
7745         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7746         mutex_lock(&hdev->vport_lock);
7747         /* VF's mps must fit within hdev->mps */
7748         if (vport->vport_id && max_frm_size > hdev->mps) {
7749                 mutex_unlock(&hdev->vport_lock);
7750                 return -EINVAL;
7751         } else if (vport->vport_id) {
7752                 vport->mps = max_frm_size;
7753                 mutex_unlock(&hdev->vport_lock);
7754                 return 0;
7755         }
7756
7757         /* PF's mps must be greater then VF's mps */
7758         for (i = 1; i < hdev->num_alloc_vport; i++)
7759                 if (max_frm_size < hdev->vport[i].mps) {
7760                         mutex_unlock(&hdev->vport_lock);
7761                         return -EINVAL;
7762                 }
7763
7764         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7765
7766         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7767         if (ret) {
7768                 dev_err(&hdev->pdev->dev,
7769                         "Change mtu fail, ret =%d\n", ret);
7770                 goto out;
7771         }
7772
7773         hdev->mps = max_frm_size;
7774         vport->mps = max_frm_size;
7775
7776         ret = hclge_buffer_alloc(hdev);
7777         if (ret)
7778                 dev_err(&hdev->pdev->dev,
7779                         "Allocate buffer fail, ret =%d\n", ret);
7780
7781 out:
7782         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7783         mutex_unlock(&hdev->vport_lock);
7784         return ret;
7785 }
7786
7787 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7788                                     bool enable)
7789 {
7790         struct hclge_reset_tqp_queue_cmd *req;
7791         struct hclge_desc desc;
7792         int ret;
7793
7794         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7795
7796         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7797         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7798         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7799
7800         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7801         if (ret) {
7802                 dev_err(&hdev->pdev->dev,
7803                         "Send tqp reset cmd error, status =%d\n", ret);
7804                 return ret;
7805         }
7806
7807         return 0;
7808 }
7809
7810 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7811 {
7812         struct hclge_reset_tqp_queue_cmd *req;
7813         struct hclge_desc desc;
7814         int ret;
7815
7816         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7817
7818         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7819         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7820
7821         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7822         if (ret) {
7823                 dev_err(&hdev->pdev->dev,
7824                         "Get reset status error, status =%d\n", ret);
7825                 return ret;
7826         }
7827
7828         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7829 }
7830
7831 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7832 {
7833         struct hnae3_queue *queue;
7834         struct hclge_tqp *tqp;
7835
7836         queue = handle->kinfo.tqp[queue_id];
7837         tqp = container_of(queue, struct hclge_tqp, q);
7838
7839         return tqp->index;
7840 }
7841
7842 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7843 {
7844         struct hclge_vport *vport = hclge_get_vport(handle);
7845         struct hclge_dev *hdev = vport->back;
7846         int reset_try_times = 0;
7847         int reset_status;
7848         u16 queue_gid;
7849         int ret = 0;
7850
7851         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7852
7853         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7854         if (ret) {
7855                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7856                 return ret;
7857         }
7858
7859         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7860         if (ret) {
7861                 dev_err(&hdev->pdev->dev,
7862                         "Send reset tqp cmd fail, ret = %d\n", ret);
7863                 return ret;
7864         }
7865
7866         reset_try_times = 0;
7867         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7868                 /* Wait for tqp hw reset */
7869                 msleep(20);
7870                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7871                 if (reset_status)
7872                         break;
7873         }
7874
7875         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7876                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7877                 return ret;
7878         }
7879
7880         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7881         if (ret)
7882                 dev_err(&hdev->pdev->dev,
7883                         "Deassert the soft reset fail, ret = %d\n", ret);
7884
7885         return ret;
7886 }
7887
7888 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7889 {
7890         struct hclge_dev *hdev = vport->back;
7891         int reset_try_times = 0;
7892         int reset_status;
7893         u16 queue_gid;
7894         int ret;
7895
7896         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7897
7898         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7899         if (ret) {
7900                 dev_warn(&hdev->pdev->dev,
7901                          "Send reset tqp cmd fail, ret = %d\n", ret);
7902                 return;
7903         }
7904
7905         reset_try_times = 0;
7906         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7907                 /* Wait for tqp hw reset */
7908                 msleep(20);
7909                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7910                 if (reset_status)
7911                         break;
7912         }
7913
7914         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7915                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7916                 return;
7917         }
7918
7919         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7920         if (ret)
7921                 dev_warn(&hdev->pdev->dev,
7922                          "Deassert the soft reset fail, ret = %d\n", ret);
7923 }
7924
7925 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7926 {
7927         struct hclge_vport *vport = hclge_get_vport(handle);
7928         struct hclge_dev *hdev = vport->back;
7929
7930         return hdev->fw_version;
7931 }
7932
7933 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7934 {
7935         struct phy_device *phydev = hdev->hw.mac.phydev;
7936
7937         if (!phydev)
7938                 return;
7939
7940         phy_set_asym_pause(phydev, rx_en, tx_en);
7941 }
7942
7943 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7944 {
7945         int ret;
7946
7947         if (rx_en && tx_en)
7948                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7949         else if (rx_en && !tx_en)
7950                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7951         else if (!rx_en && tx_en)
7952                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7953         else
7954                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7955
7956         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7957                 return 0;
7958
7959         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7960         if (ret) {
7961                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7962                         ret);
7963                 return ret;
7964         }
7965
7966         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7967
7968         return 0;
7969 }
7970
7971 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7972 {
7973         struct phy_device *phydev = hdev->hw.mac.phydev;
7974         u16 remote_advertising = 0;
7975         u16 local_advertising = 0;
7976         u32 rx_pause, tx_pause;
7977         u8 flowctl;
7978
7979         if (!phydev->link || !phydev->autoneg)
7980                 return 0;
7981
7982         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7983
7984         if (phydev->pause)
7985                 remote_advertising = LPA_PAUSE_CAP;
7986
7987         if (phydev->asym_pause)
7988                 remote_advertising |= LPA_PAUSE_ASYM;
7989
7990         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7991                                            remote_advertising);
7992         tx_pause = flowctl & FLOW_CTRL_TX;
7993         rx_pause = flowctl & FLOW_CTRL_RX;
7994
7995         if (phydev->duplex == HCLGE_MAC_HALF) {
7996                 tx_pause = 0;
7997                 rx_pause = 0;
7998         }
7999
8000         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8001 }
8002
8003 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8004                                  u32 *rx_en, u32 *tx_en)
8005 {
8006         struct hclge_vport *vport = hclge_get_vport(handle);
8007         struct hclge_dev *hdev = vport->back;
8008
8009         *auto_neg = hclge_get_autoneg(handle);
8010
8011         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8012                 *rx_en = 0;
8013                 *tx_en = 0;
8014                 return;
8015         }
8016
8017         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8018                 *rx_en = 1;
8019                 *tx_en = 0;
8020         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8021                 *tx_en = 1;
8022                 *rx_en = 0;
8023         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8024                 *rx_en = 1;
8025                 *tx_en = 1;
8026         } else {
8027                 *rx_en = 0;
8028                 *tx_en = 0;
8029         }
8030 }
8031
8032 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8033                                 u32 rx_en, u32 tx_en)
8034 {
8035         struct hclge_vport *vport = hclge_get_vport(handle);
8036         struct hclge_dev *hdev = vport->back;
8037         struct phy_device *phydev = hdev->hw.mac.phydev;
8038         u32 fc_autoneg;
8039
8040         fc_autoneg = hclge_get_autoneg(handle);
8041         if (auto_neg != fc_autoneg) {
8042                 dev_info(&hdev->pdev->dev,
8043                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8044                 return -EOPNOTSUPP;
8045         }
8046
8047         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8048                 dev_info(&hdev->pdev->dev,
8049                          "Priority flow control enabled. Cannot set link flow control.\n");
8050                 return -EOPNOTSUPP;
8051         }
8052
8053         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8054
8055         if (!fc_autoneg)
8056                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8057
8058         if (phydev)
8059                 return phy_start_aneg(phydev);
8060
8061         if (hdev->pdev->revision == 0x20)
8062                 return -EOPNOTSUPP;
8063
8064         return hclge_restart_autoneg(handle);
8065 }
8066
8067 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8068                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8069 {
8070         struct hclge_vport *vport = hclge_get_vport(handle);
8071         struct hclge_dev *hdev = vport->back;
8072
8073         if (speed)
8074                 *speed = hdev->hw.mac.speed;
8075         if (duplex)
8076                 *duplex = hdev->hw.mac.duplex;
8077         if (auto_neg)
8078                 *auto_neg = hdev->hw.mac.autoneg;
8079 }
8080
8081 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8082                                  u8 *module_type)
8083 {
8084         struct hclge_vport *vport = hclge_get_vport(handle);
8085         struct hclge_dev *hdev = vport->back;
8086
8087         if (media_type)
8088                 *media_type = hdev->hw.mac.media_type;
8089
8090         if (module_type)
8091                 *module_type = hdev->hw.mac.module_type;
8092 }
8093
8094 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8095                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8096 {
8097         struct hclge_vport *vport = hclge_get_vport(handle);
8098         struct hclge_dev *hdev = vport->back;
8099         struct phy_device *phydev = hdev->hw.mac.phydev;
8100         int mdix_ctrl, mdix, retval, is_resolved;
8101
8102         if (!phydev) {
8103                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8104                 *tp_mdix = ETH_TP_MDI_INVALID;
8105                 return;
8106         }
8107
8108         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8109
8110         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8111         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8112                                     HCLGE_PHY_MDIX_CTRL_S);
8113
8114         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8115         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8116         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8117
8118         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8119
8120         switch (mdix_ctrl) {
8121         case 0x0:
8122                 *tp_mdix_ctrl = ETH_TP_MDI;
8123                 break;
8124         case 0x1:
8125                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8126                 break;
8127         case 0x3:
8128                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8129                 break;
8130         default:
8131                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8132                 break;
8133         }
8134
8135         if (!is_resolved)
8136                 *tp_mdix = ETH_TP_MDI_INVALID;
8137         else if (mdix)
8138                 *tp_mdix = ETH_TP_MDI_X;
8139         else
8140                 *tp_mdix = ETH_TP_MDI;
8141 }
8142
8143 static void hclge_info_show(struct hclge_dev *hdev)
8144 {
8145         struct device *dev = &hdev->pdev->dev;
8146
8147         dev_info(dev, "PF info begin:\n");
8148
8149         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8150         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8151         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8152         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8153         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8154         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8155         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8156         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8157         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8158         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8159         dev_info(dev, "This is %s PF\n",
8160                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8161         dev_info(dev, "DCB %s\n",
8162                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8163         dev_info(dev, "MQPRIO %s\n",
8164                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8165
8166         dev_info(dev, "PF info end.\n");
8167 }
8168
8169 static int hclge_init_client_instance(struct hnae3_client *client,
8170                                       struct hnae3_ae_dev *ae_dev)
8171 {
8172         struct hclge_dev *hdev = ae_dev->priv;
8173         struct hclge_vport *vport;
8174         int i, ret;
8175
8176         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8177                 vport = &hdev->vport[i];
8178
8179                 switch (client->type) {
8180                 case HNAE3_CLIENT_KNIC:
8181
8182                         hdev->nic_client = client;
8183                         vport->nic.client = client;
8184                         ret = client->ops->init_instance(&vport->nic);
8185                         if (ret)
8186                                 goto clear_nic;
8187
8188                         hnae3_set_client_init_flag(client, ae_dev, 1);
8189
8190                         if (netif_msg_drv(&hdev->vport->nic))
8191                                 hclge_info_show(hdev);
8192
8193                         if (hdev->roce_client &&
8194                             hnae3_dev_roce_supported(hdev)) {
8195                                 struct hnae3_client *rc = hdev->roce_client;
8196
8197                                 ret = hclge_init_roce_base_info(vport);
8198                                 if (ret)
8199                                         goto clear_roce;
8200
8201                                 ret = rc->ops->init_instance(&vport->roce);
8202                                 if (ret)
8203                                         goto clear_roce;
8204
8205                                 hnae3_set_client_init_flag(hdev->roce_client,
8206                                                            ae_dev, 1);
8207                         }
8208
8209                         break;
8210                 case HNAE3_CLIENT_UNIC:
8211                         hdev->nic_client = client;
8212                         vport->nic.client = client;
8213
8214                         ret = client->ops->init_instance(&vport->nic);
8215                         if (ret)
8216                                 goto clear_nic;
8217
8218                         hnae3_set_client_init_flag(client, ae_dev, 1);
8219
8220                         break;
8221                 case HNAE3_CLIENT_ROCE:
8222                         if (hnae3_dev_roce_supported(hdev)) {
8223                                 hdev->roce_client = client;
8224                                 vport->roce.client = client;
8225                         }
8226
8227                         if (hdev->roce_client && hdev->nic_client) {
8228                                 ret = hclge_init_roce_base_info(vport);
8229                                 if (ret)
8230                                         goto clear_roce;
8231
8232                                 ret = client->ops->init_instance(&vport->roce);
8233                                 if (ret)
8234                                         goto clear_roce;
8235
8236                                 hnae3_set_client_init_flag(client, ae_dev, 1);
8237                         }
8238
8239                         break;
8240                 default:
8241                         return -EINVAL;
8242                 }
8243         }
8244
8245         return 0;
8246
8247 clear_nic:
8248         hdev->nic_client = NULL;
8249         vport->nic.client = NULL;
8250         return ret;
8251 clear_roce:
8252         hdev->roce_client = NULL;
8253         vport->roce.client = NULL;
8254         return ret;
8255 }
8256
8257 static void hclge_uninit_client_instance(struct hnae3_client *client,
8258                                          struct hnae3_ae_dev *ae_dev)
8259 {
8260         struct hclge_dev *hdev = ae_dev->priv;
8261         struct hclge_vport *vport;
8262         int i;
8263
8264         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8265                 vport = &hdev->vport[i];
8266                 if (hdev->roce_client) {
8267                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8268                                                                 0);
8269                         hdev->roce_client = NULL;
8270                         vport->roce.client = NULL;
8271                 }
8272                 if (client->type == HNAE3_CLIENT_ROCE)
8273                         return;
8274                 if (hdev->nic_client && client->ops->uninit_instance) {
8275                         client->ops->uninit_instance(&vport->nic, 0);
8276                         hdev->nic_client = NULL;
8277                         vport->nic.client = NULL;
8278                 }
8279         }
8280 }
8281
8282 static int hclge_pci_init(struct hclge_dev *hdev)
8283 {
8284         struct pci_dev *pdev = hdev->pdev;
8285         struct hclge_hw *hw;
8286         int ret;
8287
8288         ret = pci_enable_device(pdev);
8289         if (ret) {
8290                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8291                 return ret;
8292         }
8293
8294         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8295         if (ret) {
8296                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8297                 if (ret) {
8298                         dev_err(&pdev->dev,
8299                                 "can't set consistent PCI DMA");
8300                         goto err_disable_device;
8301                 }
8302                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8303         }
8304
8305         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8306         if (ret) {
8307                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8308                 goto err_disable_device;
8309         }
8310
8311         pci_set_master(pdev);
8312         hw = &hdev->hw;
8313         hw->io_base = pcim_iomap(pdev, 2, 0);
8314         if (!hw->io_base) {
8315                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8316                 ret = -ENOMEM;
8317                 goto err_clr_master;
8318         }
8319
8320         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8321
8322         return 0;
8323 err_clr_master:
8324         pci_clear_master(pdev);
8325         pci_release_regions(pdev);
8326 err_disable_device:
8327         pci_disable_device(pdev);
8328
8329         return ret;
8330 }
8331
8332 static void hclge_pci_uninit(struct hclge_dev *hdev)
8333 {
8334         struct pci_dev *pdev = hdev->pdev;
8335
8336         pcim_iounmap(pdev, hdev->hw.io_base);
8337         pci_free_irq_vectors(pdev);
8338         pci_clear_master(pdev);
8339         pci_release_mem_regions(pdev);
8340         pci_disable_device(pdev);
8341 }
8342
8343 static void hclge_state_init(struct hclge_dev *hdev)
8344 {
8345         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8346         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8347         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8348         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8349         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8350         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8351 }
8352
8353 static void hclge_state_uninit(struct hclge_dev *hdev)
8354 {
8355         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8356
8357         if (hdev->service_timer.function)
8358                 del_timer_sync(&hdev->service_timer);
8359         if (hdev->reset_timer.function)
8360                 del_timer_sync(&hdev->reset_timer);
8361         if (hdev->service_task.func)
8362                 cancel_work_sync(&hdev->service_task);
8363         if (hdev->rst_service_task.func)
8364                 cancel_work_sync(&hdev->rst_service_task);
8365         if (hdev->mbx_service_task.func)
8366                 cancel_work_sync(&hdev->mbx_service_task);
8367 }
8368
8369 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8370 {
8371 #define HCLGE_FLR_WAIT_MS       100
8372 #define HCLGE_FLR_WAIT_CNT      50
8373         struct hclge_dev *hdev = ae_dev->priv;
8374         int cnt = 0;
8375
8376         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8377         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8378         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8379         hclge_reset_event(hdev->pdev, NULL);
8380
8381         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8382                cnt++ < HCLGE_FLR_WAIT_CNT)
8383                 msleep(HCLGE_FLR_WAIT_MS);
8384
8385         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8386                 dev_err(&hdev->pdev->dev,
8387                         "flr wait down timeout: %d\n", cnt);
8388 }
8389
8390 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8391 {
8392         struct hclge_dev *hdev = ae_dev->priv;
8393
8394         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8395 }
8396
8397 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8398 {
8399         struct pci_dev *pdev = ae_dev->pdev;
8400         struct hclge_dev *hdev;
8401         int ret;
8402
8403         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8404         if (!hdev) {
8405                 ret = -ENOMEM;
8406                 goto out;
8407         }
8408
8409         hdev->pdev = pdev;
8410         hdev->ae_dev = ae_dev;
8411         hdev->reset_type = HNAE3_NONE_RESET;
8412         hdev->reset_level = HNAE3_FUNC_RESET;
8413         ae_dev->priv = hdev;
8414         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8415
8416         mutex_init(&hdev->vport_lock);
8417         mutex_init(&hdev->vport_cfg_mutex);
8418         spin_lock_init(&hdev->fd_rule_lock);
8419
8420         ret = hclge_pci_init(hdev);
8421         if (ret) {
8422                 dev_err(&pdev->dev, "PCI init failed\n");
8423                 goto out;
8424         }
8425
8426         /* Firmware command queue initialize */
8427         ret = hclge_cmd_queue_init(hdev);
8428         if (ret) {
8429                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8430                 goto err_pci_uninit;
8431         }
8432
8433         /* Firmware command initialize */
8434         ret = hclge_cmd_init(hdev);
8435         if (ret)
8436                 goto err_cmd_uninit;
8437
8438         ret = hclge_get_cap(hdev);
8439         if (ret) {
8440                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8441                         ret);
8442                 goto err_cmd_uninit;
8443         }
8444
8445         ret = hclge_configure(hdev);
8446         if (ret) {
8447                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8448                 goto err_cmd_uninit;
8449         }
8450
8451         ret = hclge_init_msi(hdev);
8452         if (ret) {
8453                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8454                 goto err_cmd_uninit;
8455         }
8456
8457         ret = hclge_misc_irq_init(hdev);
8458         if (ret) {
8459                 dev_err(&pdev->dev,
8460                         "Misc IRQ(vector0) init error, ret = %d.\n",
8461                         ret);
8462                 goto err_msi_uninit;
8463         }
8464
8465         ret = hclge_alloc_tqps(hdev);
8466         if (ret) {
8467                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8468                 goto err_msi_irq_uninit;
8469         }
8470
8471         ret = hclge_alloc_vport(hdev);
8472         if (ret) {
8473                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8474                 goto err_msi_irq_uninit;
8475         }
8476
8477         ret = hclge_map_tqp(hdev);
8478         if (ret) {
8479                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8480                 goto err_msi_irq_uninit;
8481         }
8482
8483         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8484                 ret = hclge_mac_mdio_config(hdev);
8485                 if (ret) {
8486                         dev_err(&hdev->pdev->dev,
8487                                 "mdio config fail ret=%d\n", ret);
8488                         goto err_msi_irq_uninit;
8489                 }
8490         }
8491
8492         ret = hclge_init_umv_space(hdev);
8493         if (ret) {
8494                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8495                 goto err_mdiobus_unreg;
8496         }
8497
8498         ret = hclge_mac_init(hdev);
8499         if (ret) {
8500                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8501                 goto err_mdiobus_unreg;
8502         }
8503
8504         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8505         if (ret) {
8506                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8507                 goto err_mdiobus_unreg;
8508         }
8509
8510         ret = hclge_config_gro(hdev, true);
8511         if (ret)
8512                 goto err_mdiobus_unreg;
8513
8514         ret = hclge_init_vlan_config(hdev);
8515         if (ret) {
8516                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8517                 goto err_mdiobus_unreg;
8518         }
8519
8520         ret = hclge_tm_schd_init(hdev);
8521         if (ret) {
8522                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8523                 goto err_mdiobus_unreg;
8524         }
8525
8526         hclge_rss_init_cfg(hdev);
8527         ret = hclge_rss_init_hw(hdev);
8528         if (ret) {
8529                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8530                 goto err_mdiobus_unreg;
8531         }
8532
8533         ret = init_mgr_tbl(hdev);
8534         if (ret) {
8535                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8536                 goto err_mdiobus_unreg;
8537         }
8538
8539         ret = hclge_init_fd_config(hdev);
8540         if (ret) {
8541                 dev_err(&pdev->dev,
8542                         "fd table init fail, ret=%d\n", ret);
8543                 goto err_mdiobus_unreg;
8544         }
8545
8546         ret = hclge_hw_error_set_state(hdev, true);
8547         if (ret) {
8548                 dev_err(&pdev->dev,
8549                         "fail(%d) to enable hw error interrupts\n", ret);
8550                 goto err_mdiobus_unreg;
8551         }
8552
8553         INIT_KFIFO(hdev->mac_tnl_log);
8554
8555         hclge_dcb_ops_set(hdev);
8556
8557         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8558         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8559         INIT_WORK(&hdev->service_task, hclge_service_task);
8560         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8561         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8562
8563         hclge_clear_all_event_cause(hdev);
8564
8565         /* Enable MISC vector(vector0) */
8566         hclge_enable_vector(&hdev->misc_vector, true);
8567
8568         hclge_state_init(hdev);
8569         hdev->last_reset_time = jiffies;
8570
8571         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8572         return 0;
8573
8574 err_mdiobus_unreg:
8575         if (hdev->hw.mac.phydev)
8576                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8577 err_msi_irq_uninit:
8578         hclge_misc_irq_uninit(hdev);
8579 err_msi_uninit:
8580         pci_free_irq_vectors(pdev);
8581 err_cmd_uninit:
8582         hclge_cmd_uninit(hdev);
8583 err_pci_uninit:
8584         pcim_iounmap(pdev, hdev->hw.io_base);
8585         pci_clear_master(pdev);
8586         pci_release_regions(pdev);
8587         pci_disable_device(pdev);
8588 out:
8589         return ret;
8590 }
8591
8592 static void hclge_stats_clear(struct hclge_dev *hdev)
8593 {
8594         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8595 }
8596
8597 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8598 {
8599         struct hclge_vport *vport = hdev->vport;
8600         int i;
8601
8602         for (i = 0; i < hdev->num_alloc_vport; i++) {
8603                 hclge_vport_stop(vport);
8604                 vport++;
8605         }
8606 }
8607
8608 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8609 {
8610         struct hclge_dev *hdev = ae_dev->priv;
8611         struct pci_dev *pdev = ae_dev->pdev;
8612         int ret;
8613
8614         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8615
8616         hclge_stats_clear(hdev);
8617         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8618
8619         ret = hclge_cmd_init(hdev);
8620         if (ret) {
8621                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8622                 return ret;
8623         }
8624
8625         ret = hclge_map_tqp(hdev);
8626         if (ret) {
8627                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8628                 return ret;
8629         }
8630
8631         hclge_reset_umv_space(hdev);
8632
8633         ret = hclge_mac_init(hdev);
8634         if (ret) {
8635                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8636                 return ret;
8637         }
8638
8639         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8640         if (ret) {
8641                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8642                 return ret;
8643         }
8644
8645         ret = hclge_config_gro(hdev, true);
8646         if (ret)
8647                 return ret;
8648
8649         ret = hclge_init_vlan_config(hdev);
8650         if (ret) {
8651                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8652                 return ret;
8653         }
8654
8655         ret = hclge_tm_init_hw(hdev, true);
8656         if (ret) {
8657                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8658                 return ret;
8659         }
8660
8661         ret = hclge_rss_init_hw(hdev);
8662         if (ret) {
8663                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8664                 return ret;
8665         }
8666
8667         ret = hclge_init_fd_config(hdev);
8668         if (ret) {
8669                 dev_err(&pdev->dev,
8670                         "fd table init fail, ret=%d\n", ret);
8671                 return ret;
8672         }
8673
8674         /* Re-enable the hw error interrupts because
8675          * the interrupts get disabled on core/global reset.
8676          */
8677         ret = hclge_hw_error_set_state(hdev, true);
8678         if (ret) {
8679                 dev_err(&pdev->dev,
8680                         "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8681                 return ret;
8682         }
8683
8684         hclge_reset_vport_state(hdev);
8685
8686         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8687                  HCLGE_DRIVER_NAME);
8688
8689         return 0;
8690 }
8691
8692 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8693 {
8694         struct hclge_dev *hdev = ae_dev->priv;
8695         struct hclge_mac *mac = &hdev->hw.mac;
8696
8697         hclge_state_uninit(hdev);
8698
8699         if (mac->phydev)
8700                 mdiobus_unregister(mac->mdio_bus);
8701
8702         hclge_uninit_umv_space(hdev);
8703
8704         /* Disable MISC vector(vector0) */
8705         hclge_enable_vector(&hdev->misc_vector, false);
8706         synchronize_irq(hdev->misc_vector.vector_irq);
8707
8708         hclge_config_mac_tnl_int(hdev, false);
8709         hclge_hw_error_set_state(hdev, false);
8710         hclge_cmd_uninit(hdev);
8711         hclge_misc_irq_uninit(hdev);
8712         hclge_pci_uninit(hdev);
8713         mutex_destroy(&hdev->vport_lock);
8714         hclge_uninit_vport_mac_table(hdev);
8715         hclge_uninit_vport_vlan_table(hdev);
8716         mutex_destroy(&hdev->vport_cfg_mutex);
8717         ae_dev->priv = NULL;
8718 }
8719
8720 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8721 {
8722         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8723         struct hclge_vport *vport = hclge_get_vport(handle);
8724         struct hclge_dev *hdev = vport->back;
8725
8726         return min_t(u32, hdev->rss_size_max,
8727                      vport->alloc_tqps / kinfo->num_tc);
8728 }
8729
8730 static void hclge_get_channels(struct hnae3_handle *handle,
8731                                struct ethtool_channels *ch)
8732 {
8733         ch->max_combined = hclge_get_max_channels(handle);
8734         ch->other_count = 1;
8735         ch->max_other = 1;
8736         ch->combined_count = handle->kinfo.rss_size;
8737 }
8738
8739 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8740                                         u16 *alloc_tqps, u16 *max_rss_size)
8741 {
8742         struct hclge_vport *vport = hclge_get_vport(handle);
8743         struct hclge_dev *hdev = vport->back;
8744
8745         *alloc_tqps = vport->alloc_tqps;
8746         *max_rss_size = hdev->rss_size_max;
8747 }
8748
8749 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8750                               bool rxfh_configured)
8751 {
8752         struct hclge_vport *vport = hclge_get_vport(handle);
8753         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8754         struct hclge_dev *hdev = vport->back;
8755         int cur_rss_size = kinfo->rss_size;
8756         int cur_tqps = kinfo->num_tqps;
8757         u16 tc_offset[HCLGE_MAX_TC_NUM];
8758         u16 tc_valid[HCLGE_MAX_TC_NUM];
8759         u16 tc_size[HCLGE_MAX_TC_NUM];
8760         u16 roundup_size;
8761         u32 *rss_indir;
8762         int ret, i;
8763
8764         kinfo->req_rss_size = new_tqps_num;
8765
8766         ret = hclge_tm_vport_map_update(hdev);
8767         if (ret) {
8768                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8769                 return ret;
8770         }
8771
8772         roundup_size = roundup_pow_of_two(kinfo->rss_size);
8773         roundup_size = ilog2(roundup_size);
8774         /* Set the RSS TC mode according to the new RSS size */
8775         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8776                 tc_valid[i] = 0;
8777
8778                 if (!(hdev->hw_tc_map & BIT(i)))
8779                         continue;
8780
8781                 tc_valid[i] = 1;
8782                 tc_size[i] = roundup_size;
8783                 tc_offset[i] = kinfo->rss_size * i;
8784         }
8785         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8786         if (ret)
8787                 return ret;
8788
8789         /* RSS indirection table has been configuared by user */
8790         if (rxfh_configured)
8791                 goto out;
8792
8793         /* Reinitializes the rss indirect table according to the new RSS size */
8794         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8795         if (!rss_indir)
8796                 return -ENOMEM;
8797
8798         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8799                 rss_indir[i] = i % kinfo->rss_size;
8800
8801         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8802         if (ret)
8803                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8804                         ret);
8805
8806         kfree(rss_indir);
8807
8808 out:
8809         if (!ret)
8810                 dev_info(&hdev->pdev->dev,
8811                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8812                          cur_rss_size, kinfo->rss_size,
8813                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
8814
8815         return ret;
8816 }
8817
8818 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8819                               u32 *regs_num_64_bit)
8820 {
8821         struct hclge_desc desc;
8822         u32 total_num;
8823         int ret;
8824
8825         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8826         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8827         if (ret) {
8828                 dev_err(&hdev->pdev->dev,
8829                         "Query register number cmd failed, ret = %d.\n", ret);
8830                 return ret;
8831         }
8832
8833         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8834         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8835
8836         total_num = *regs_num_32_bit + *regs_num_64_bit;
8837         if (!total_num)
8838                 return -EINVAL;
8839
8840         return 0;
8841 }
8842
8843 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8844                                  void *data)
8845 {
8846 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8847
8848         struct hclge_desc *desc;
8849         u32 *reg_val = data;
8850         __le32 *desc_data;
8851         int cmd_num;
8852         int i, k, n;
8853         int ret;
8854
8855         if (regs_num == 0)
8856                 return 0;
8857
8858         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8859         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8860         if (!desc)
8861                 return -ENOMEM;
8862
8863         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8864         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8865         if (ret) {
8866                 dev_err(&hdev->pdev->dev,
8867                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8868                 kfree(desc);
8869                 return ret;
8870         }
8871
8872         for (i = 0; i < cmd_num; i++) {
8873                 if (i == 0) {
8874                         desc_data = (__le32 *)(&desc[i].data[0]);
8875                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8876                 } else {
8877                         desc_data = (__le32 *)(&desc[i]);
8878                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8879                 }
8880                 for (k = 0; k < n; k++) {
8881                         *reg_val++ = le32_to_cpu(*desc_data++);
8882
8883                         regs_num--;
8884                         if (!regs_num)
8885                                 break;
8886                 }
8887         }
8888
8889         kfree(desc);
8890         return 0;
8891 }
8892
8893 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8894                                  void *data)
8895 {
8896 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8897
8898         struct hclge_desc *desc;
8899         u64 *reg_val = data;
8900         __le64 *desc_data;
8901         int cmd_num;
8902         int i, k, n;
8903         int ret;
8904
8905         if (regs_num == 0)
8906                 return 0;
8907
8908         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8909         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8910         if (!desc)
8911                 return -ENOMEM;
8912
8913         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8914         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8915         if (ret) {
8916                 dev_err(&hdev->pdev->dev,
8917                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
8918                 kfree(desc);
8919                 return ret;
8920         }
8921
8922         for (i = 0; i < cmd_num; i++) {
8923                 if (i == 0) {
8924                         desc_data = (__le64 *)(&desc[i].data[0]);
8925                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8926                 } else {
8927                         desc_data = (__le64 *)(&desc[i]);
8928                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
8929                 }
8930                 for (k = 0; k < n; k++) {
8931                         *reg_val++ = le64_to_cpu(*desc_data++);
8932
8933                         regs_num--;
8934                         if (!regs_num)
8935                                 break;
8936                 }
8937         }
8938
8939         kfree(desc);
8940         return 0;
8941 }
8942
8943 #define MAX_SEPARATE_NUM        4
8944 #define SEPARATOR_VALUE         0xFFFFFFFF
8945 #define REG_NUM_PER_LINE        4
8946 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
8947
8948 static int hclge_get_regs_len(struct hnae3_handle *handle)
8949 {
8950         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8951         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8952         struct hclge_vport *vport = hclge_get_vport(handle);
8953         struct hclge_dev *hdev = vport->back;
8954         u32 regs_num_32_bit, regs_num_64_bit;
8955         int ret;
8956
8957         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8958         if (ret) {
8959                 dev_err(&hdev->pdev->dev,
8960                         "Get register number failed, ret = %d.\n", ret);
8961                 return -EOPNOTSUPP;
8962         }
8963
8964         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8965         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8966         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8967         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8968
8969         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8970                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8971                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8972 }
8973
8974 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8975                            void *data)
8976 {
8977         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8978         struct hclge_vport *vport = hclge_get_vport(handle);
8979         struct hclge_dev *hdev = vport->back;
8980         u32 regs_num_32_bit, regs_num_64_bit;
8981         int i, j, reg_um, separator_num;
8982         u32 *reg = data;
8983         int ret;
8984
8985         *version = hdev->fw_version;
8986
8987         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8988         if (ret) {
8989                 dev_err(&hdev->pdev->dev,
8990                         "Get register number failed, ret = %d.\n", ret);
8991                 return;
8992         }
8993
8994         /* fetching per-PF registers valus from PF PCIe register space */
8995         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8996         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8997         for (i = 0; i < reg_um; i++)
8998                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8999         for (i = 0; i < separator_num; i++)
9000                 *reg++ = SEPARATOR_VALUE;
9001
9002         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9003         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9004         for (i = 0; i < reg_um; i++)
9005                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9006         for (i = 0; i < separator_num; i++)
9007                 *reg++ = SEPARATOR_VALUE;
9008
9009         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9010         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9011         for (j = 0; j < kinfo->num_tqps; j++) {
9012                 for (i = 0; i < reg_um; i++)
9013                         *reg++ = hclge_read_dev(&hdev->hw,
9014                                                 ring_reg_addr_list[i] +
9015                                                 0x200 * j);
9016                 for (i = 0; i < separator_num; i++)
9017                         *reg++ = SEPARATOR_VALUE;
9018         }
9019
9020         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9021         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9022         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9023                 for (i = 0; i < reg_um; i++)
9024                         *reg++ = hclge_read_dev(&hdev->hw,
9025                                                 tqp_intr_reg_addr_list[i] +
9026                                                 4 * j);
9027                 for (i = 0; i < separator_num; i++)
9028                         *reg++ = SEPARATOR_VALUE;
9029         }
9030
9031         /* fetching PF common registers values from firmware */
9032         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9033         if (ret) {
9034                 dev_err(&hdev->pdev->dev,
9035                         "Get 32 bit register failed, ret = %d.\n", ret);
9036                 return;
9037         }
9038
9039         reg += regs_num_32_bit;
9040         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9041         if (ret)
9042                 dev_err(&hdev->pdev->dev,
9043                         "Get 64 bit register failed, ret = %d.\n", ret);
9044 }
9045
9046 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9047 {
9048         struct hclge_set_led_state_cmd *req;
9049         struct hclge_desc desc;
9050         int ret;
9051
9052         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9053
9054         req = (struct hclge_set_led_state_cmd *)desc.data;
9055         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9056                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9057
9058         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9059         if (ret)
9060                 dev_err(&hdev->pdev->dev,
9061                         "Send set led state cmd error, ret =%d\n", ret);
9062
9063         return ret;
9064 }
9065
9066 enum hclge_led_status {
9067         HCLGE_LED_OFF,
9068         HCLGE_LED_ON,
9069         HCLGE_LED_NO_CHANGE = 0xFF,
9070 };
9071
9072 static int hclge_set_led_id(struct hnae3_handle *handle,
9073                             enum ethtool_phys_id_state status)
9074 {
9075         struct hclge_vport *vport = hclge_get_vport(handle);
9076         struct hclge_dev *hdev = vport->back;
9077
9078         switch (status) {
9079         case ETHTOOL_ID_ACTIVE:
9080                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9081         case ETHTOOL_ID_INACTIVE:
9082                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9083         default:
9084                 return -EINVAL;
9085         }
9086 }
9087
9088 static void hclge_get_link_mode(struct hnae3_handle *handle,
9089                                 unsigned long *supported,
9090                                 unsigned long *advertising)
9091 {
9092         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9093         struct hclge_vport *vport = hclge_get_vport(handle);
9094         struct hclge_dev *hdev = vport->back;
9095         unsigned int idx = 0;
9096
9097         for (; idx < size; idx++) {
9098                 supported[idx] = hdev->hw.mac.supported[idx];
9099                 advertising[idx] = hdev->hw.mac.advertising[idx];
9100         }
9101 }
9102
9103 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9104 {
9105         struct hclge_vport *vport = hclge_get_vport(handle);
9106         struct hclge_dev *hdev = vport->back;
9107
9108         return hclge_config_gro(hdev, enable);
9109 }
9110
9111 static const struct hnae3_ae_ops hclge_ops = {
9112         .init_ae_dev = hclge_init_ae_dev,
9113         .uninit_ae_dev = hclge_uninit_ae_dev,
9114         .flr_prepare = hclge_flr_prepare,
9115         .flr_done = hclge_flr_done,
9116         .init_client_instance = hclge_init_client_instance,
9117         .uninit_client_instance = hclge_uninit_client_instance,
9118         .map_ring_to_vector = hclge_map_ring_to_vector,
9119         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9120         .get_vector = hclge_get_vector,
9121         .put_vector = hclge_put_vector,
9122         .set_promisc_mode = hclge_set_promisc_mode,
9123         .set_loopback = hclge_set_loopback,
9124         .start = hclge_ae_start,
9125         .stop = hclge_ae_stop,
9126         .client_start = hclge_client_start,
9127         .client_stop = hclge_client_stop,
9128         .get_status = hclge_get_status,
9129         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9130         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9131         .get_media_type = hclge_get_media_type,
9132         .check_port_speed = hclge_check_port_speed,
9133         .get_fec = hclge_get_fec,
9134         .set_fec = hclge_set_fec,
9135         .get_rss_key_size = hclge_get_rss_key_size,
9136         .get_rss_indir_size = hclge_get_rss_indir_size,
9137         .get_rss = hclge_get_rss,
9138         .set_rss = hclge_set_rss,
9139         .set_rss_tuple = hclge_set_rss_tuple,
9140         .get_rss_tuple = hclge_get_rss_tuple,
9141         .get_tc_size = hclge_get_tc_size,
9142         .get_mac_addr = hclge_get_mac_addr,
9143         .set_mac_addr = hclge_set_mac_addr,
9144         .do_ioctl = hclge_do_ioctl,
9145         .add_uc_addr = hclge_add_uc_addr,
9146         .rm_uc_addr = hclge_rm_uc_addr,
9147         .add_mc_addr = hclge_add_mc_addr,
9148         .rm_mc_addr = hclge_rm_mc_addr,
9149         .set_autoneg = hclge_set_autoneg,
9150         .get_autoneg = hclge_get_autoneg,
9151         .restart_autoneg = hclge_restart_autoneg,
9152         .get_pauseparam = hclge_get_pauseparam,
9153         .set_pauseparam = hclge_set_pauseparam,
9154         .set_mtu = hclge_set_mtu,
9155         .reset_queue = hclge_reset_tqp,
9156         .get_stats = hclge_get_stats,
9157         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9158         .update_stats = hclge_update_stats,
9159         .get_strings = hclge_get_strings,
9160         .get_sset_count = hclge_get_sset_count,
9161         .get_fw_version = hclge_get_fw_version,
9162         .get_mdix_mode = hclge_get_mdix_mode,
9163         .enable_vlan_filter = hclge_enable_vlan_filter,
9164         .set_vlan_filter = hclge_set_vlan_filter,
9165         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9166         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9167         .reset_event = hclge_reset_event,
9168         .set_default_reset_request = hclge_set_def_reset_request,
9169         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9170         .set_channels = hclge_set_channels,
9171         .get_channels = hclge_get_channels,
9172         .get_regs_len = hclge_get_regs_len,
9173         .get_regs = hclge_get_regs,
9174         .set_led_id = hclge_set_led_id,
9175         .get_link_mode = hclge_get_link_mode,
9176         .add_fd_entry = hclge_add_fd_entry,
9177         .del_fd_entry = hclge_del_fd_entry,
9178         .del_all_fd_entries = hclge_del_all_fd_entries,
9179         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9180         .get_fd_rule_info = hclge_get_fd_rule_info,
9181         .get_fd_all_rules = hclge_get_all_rules,
9182         .restore_fd_rules = hclge_restore_fd_entries,
9183         .enable_fd = hclge_enable_fd,
9184         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9185         .dbg_run_cmd = hclge_dbg_run_cmd,
9186         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9187         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9188         .ae_dev_resetting = hclge_ae_dev_resetting,
9189         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9190         .set_gro_en = hclge_gro_en,
9191         .get_global_queue_id = hclge_covert_handle_qid_global,
9192         .set_timer_task = hclge_set_timer_task,
9193         .mac_connect_phy = hclge_mac_connect_phy,
9194         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9195 };
9196
9197 static struct hnae3_ae_algo ae_algo = {
9198         .ops = &hclge_ops,
9199         .pdev_id_table = ae_algo_pci_tbl,
9200 };
9201
9202 static int hclge_init(void)
9203 {
9204         pr_info("%s is initializing\n", HCLGE_NAME);
9205
9206         hnae3_register_ae_algo(&ae_algo);
9207
9208         return 0;
9209 }
9210
9211 static void hclge_exit(void)
9212 {
9213         hnae3_unregister_ae_algo(&ae_algo);
9214 }
9215 module_init(hclge_init);
9216 module_exit(hclge_exit);
9217
9218 MODULE_LICENSE("GPL");
9219 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9220 MODULE_DESCRIPTION("HCLGE Driver");
9221 MODULE_VERSION(HCLGE_MOD_VERSION);