Merge tag '4.3-rc-smb3-fixes' of git://git.samba.org/sfrench/cifs-2.6
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38
39 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
40 static int hclge_init_vlan_config(struct hclge_dev *hdev);
41 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
42 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
43 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
44 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
45                                u16 *allocated_size, bool is_alloc);
46 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
47 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
48 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
49                                                    unsigned long *addr);
50
51 static struct hnae3_ae_algo ae_algo;
52
53 static const struct pci_device_id ae_algo_pci_tbl[] = {
54         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
55         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
56         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
57         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
58         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
59         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
60         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
61         /* required last entry */
62         {0, }
63 };
64
65 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
66
67 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
68                                          HCLGE_CMDQ_TX_ADDR_H_REG,
69                                          HCLGE_CMDQ_TX_DEPTH_REG,
70                                          HCLGE_CMDQ_TX_TAIL_REG,
71                                          HCLGE_CMDQ_TX_HEAD_REG,
72                                          HCLGE_CMDQ_RX_ADDR_L_REG,
73                                          HCLGE_CMDQ_RX_ADDR_H_REG,
74                                          HCLGE_CMDQ_RX_DEPTH_REG,
75                                          HCLGE_CMDQ_RX_TAIL_REG,
76                                          HCLGE_CMDQ_RX_HEAD_REG,
77                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
78                                          HCLGE_CMDQ_INTR_STS_REG,
79                                          HCLGE_CMDQ_INTR_EN_REG,
80                                          HCLGE_CMDQ_INTR_GEN_REG};
81
82 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
83                                            HCLGE_VECTOR0_OTER_EN_REG,
84                                            HCLGE_MISC_RESET_STS_REG,
85                                            HCLGE_MISC_VECTOR_INT_STS,
86                                            HCLGE_GLOBAL_RESET_REG,
87                                            HCLGE_FUN_RST_ING,
88                                            HCLGE_GRO_EN_REG};
89
90 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
91                                          HCLGE_RING_RX_ADDR_H_REG,
92                                          HCLGE_RING_RX_BD_NUM_REG,
93                                          HCLGE_RING_RX_BD_LENGTH_REG,
94                                          HCLGE_RING_RX_MERGE_EN_REG,
95                                          HCLGE_RING_RX_TAIL_REG,
96                                          HCLGE_RING_RX_HEAD_REG,
97                                          HCLGE_RING_RX_FBD_NUM_REG,
98                                          HCLGE_RING_RX_OFFSET_REG,
99                                          HCLGE_RING_RX_FBD_OFFSET_REG,
100                                          HCLGE_RING_RX_STASH_REG,
101                                          HCLGE_RING_RX_BD_ERR_REG,
102                                          HCLGE_RING_TX_ADDR_L_REG,
103                                          HCLGE_RING_TX_ADDR_H_REG,
104                                          HCLGE_RING_TX_BD_NUM_REG,
105                                          HCLGE_RING_TX_PRIORITY_REG,
106                                          HCLGE_RING_TX_TC_REG,
107                                          HCLGE_RING_TX_MERGE_EN_REG,
108                                          HCLGE_RING_TX_TAIL_REG,
109                                          HCLGE_RING_TX_HEAD_REG,
110                                          HCLGE_RING_TX_FBD_NUM_REG,
111                                          HCLGE_RING_TX_OFFSET_REG,
112                                          HCLGE_RING_TX_EBD_NUM_REG,
113                                          HCLGE_RING_TX_EBD_OFFSET_REG,
114                                          HCLGE_RING_TX_BD_ERR_REG,
115                                          HCLGE_RING_EN_REG};
116
117 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
118                                              HCLGE_TQP_INTR_GL0_REG,
119                                              HCLGE_TQP_INTR_GL1_REG,
120                                              HCLGE_TQP_INTR_GL2_REG,
121                                              HCLGE_TQP_INTR_RL_REG};
122
123 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
124         "App    Loopback test",
125         "Serdes serial Loopback test",
126         "Serdes parallel Loopback test",
127         "Phy    Loopback test"
128 };
129
130 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
131         {"mac_tx_mac_pause_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
133         {"mac_rx_mac_pause_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
135         {"mac_tx_control_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
137         {"mac_rx_control_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
139         {"mac_tx_pfc_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
141         {"mac_tx_pfc_pri0_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
143         {"mac_tx_pfc_pri1_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
145         {"mac_tx_pfc_pri2_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
147         {"mac_tx_pfc_pri3_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
149         {"mac_tx_pfc_pri4_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
151         {"mac_tx_pfc_pri5_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
153         {"mac_tx_pfc_pri6_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
155         {"mac_tx_pfc_pri7_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
157         {"mac_rx_pfc_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
159         {"mac_rx_pfc_pri0_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
161         {"mac_rx_pfc_pri1_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
163         {"mac_rx_pfc_pri2_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
165         {"mac_rx_pfc_pri3_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
167         {"mac_rx_pfc_pri4_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
169         {"mac_rx_pfc_pri5_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
171         {"mac_rx_pfc_pri6_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
173         {"mac_rx_pfc_pri7_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
175         {"mac_tx_total_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
177         {"mac_tx_total_oct_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
179         {"mac_tx_good_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
181         {"mac_tx_bad_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
183         {"mac_tx_good_oct_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
185         {"mac_tx_bad_oct_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
187         {"mac_tx_uni_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
189         {"mac_tx_multi_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
191         {"mac_tx_broad_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
193         {"mac_tx_undersize_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
195         {"mac_tx_oversize_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
197         {"mac_tx_64_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
199         {"mac_tx_65_127_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
201         {"mac_tx_128_255_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
203         {"mac_tx_256_511_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
205         {"mac_tx_512_1023_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
207         {"mac_tx_1024_1518_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
209         {"mac_tx_1519_2047_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
211         {"mac_tx_2048_4095_oct_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
213         {"mac_tx_4096_8191_oct_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
215         {"mac_tx_8192_9216_oct_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
217         {"mac_tx_9217_12287_oct_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
219         {"mac_tx_12288_16383_oct_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
221         {"mac_tx_1519_max_good_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
223         {"mac_tx_1519_max_bad_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
225         {"mac_rx_total_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
227         {"mac_rx_total_oct_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
229         {"mac_rx_good_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
231         {"mac_rx_bad_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
233         {"mac_rx_good_oct_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
235         {"mac_rx_bad_oct_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
237         {"mac_rx_uni_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
239         {"mac_rx_multi_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
241         {"mac_rx_broad_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
243         {"mac_rx_undersize_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
245         {"mac_rx_oversize_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
247         {"mac_rx_64_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
249         {"mac_rx_65_127_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
251         {"mac_rx_128_255_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
253         {"mac_rx_256_511_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
255         {"mac_rx_512_1023_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
257         {"mac_rx_1024_1518_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
259         {"mac_rx_1519_2047_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
261         {"mac_rx_2048_4095_oct_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
263         {"mac_rx_4096_8191_oct_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
265         {"mac_rx_8192_9216_oct_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
267         {"mac_rx_9217_12287_oct_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
269         {"mac_rx_12288_16383_oct_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
271         {"mac_rx_1519_max_good_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
273         {"mac_rx_1519_max_bad_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
275
276         {"mac_tx_fragment_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
278         {"mac_tx_undermin_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
280         {"mac_tx_jabber_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
282         {"mac_tx_err_all_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
284         {"mac_tx_from_app_good_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
286         {"mac_tx_from_app_bad_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
288         {"mac_rx_fragment_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
290         {"mac_rx_undermin_pkt_num",
291                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
292         {"mac_rx_jabber_pkt_num",
293                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
294         {"mac_rx_fcs_err_pkt_num",
295                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
296         {"mac_rx_send_app_good_pkt_num",
297                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
298         {"mac_rx_send_app_bad_pkt_num",
299                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
300 };
301
302 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
303         {
304                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
305                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
306                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
307                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
308                 .i_port_bitmap = 0x1,
309         },
310 };
311
312 static const u8 hclge_hash_key[] = {
313         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
314         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
315         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
316         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
317         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
318 };
319
320 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
321 {
322 #define HCLGE_MAC_CMD_NUM 21
323
324         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
325         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
326         __le64 *desc_data;
327         int i, k, n;
328         int ret;
329
330         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
331         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
332         if (ret) {
333                 dev_err(&hdev->pdev->dev,
334                         "Get MAC pkt stats fail, status = %d.\n", ret);
335
336                 return ret;
337         }
338
339         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
340                 /* for special opcode 0032, only the first desc has the head */
341                 if (unlikely(i == 0)) {
342                         desc_data = (__le64 *)(&desc[i].data[0]);
343                         n = HCLGE_RD_FIRST_STATS_NUM;
344                 } else {
345                         desc_data = (__le64 *)(&desc[i]);
346                         n = HCLGE_RD_OTHER_STATS_NUM;
347                 }
348
349                 for (k = 0; k < n; k++) {
350                         *data += le64_to_cpu(*desc_data);
351                         data++;
352                         desc_data++;
353                 }
354         }
355
356         return 0;
357 }
358
359 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
360 {
361         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
362         struct hclge_desc *desc;
363         __le64 *desc_data;
364         u16 i, k, n;
365         int ret;
366
367         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
368         if (!desc)
369                 return -ENOMEM;
370         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
371         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
372         if (ret) {
373                 kfree(desc);
374                 return ret;
375         }
376
377         for (i = 0; i < desc_num; i++) {
378                 /* for special opcode 0034, only the first desc has the head */
379                 if (i == 0) {
380                         desc_data = (__le64 *)(&desc[i].data[0]);
381                         n = HCLGE_RD_FIRST_STATS_NUM;
382                 } else {
383                         desc_data = (__le64 *)(&desc[i]);
384                         n = HCLGE_RD_OTHER_STATS_NUM;
385                 }
386
387                 for (k = 0; k < n; k++) {
388                         *data += le64_to_cpu(*desc_data);
389                         data++;
390                         desc_data++;
391                 }
392         }
393
394         kfree(desc);
395
396         return 0;
397 }
398
399 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
400 {
401         struct hclge_desc desc;
402         __le32 *desc_data;
403         u32 reg_num;
404         int ret;
405
406         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
407         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
408         if (ret)
409                 return ret;
410
411         desc_data = (__le32 *)(&desc.data[0]);
412         reg_num = le32_to_cpu(*desc_data);
413
414         *desc_num = 1 + ((reg_num - 3) >> 2) +
415                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
416
417         return 0;
418 }
419
420 static int hclge_mac_update_stats(struct hclge_dev *hdev)
421 {
422         u32 desc_num;
423         int ret;
424
425         ret = hclge_mac_query_reg_num(hdev, &desc_num);
426
427         /* The firmware supports the new statistics acquisition method */
428         if (!ret)
429                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
430         else if (ret == -EOPNOTSUPP)
431                 ret = hclge_mac_update_stats_defective(hdev);
432         else
433                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
434
435         return ret;
436 }
437
438 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
439 {
440         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
441         struct hclge_vport *vport = hclge_get_vport(handle);
442         struct hclge_dev *hdev = vport->back;
443         struct hnae3_queue *queue;
444         struct hclge_desc desc[1];
445         struct hclge_tqp *tqp;
446         int ret, i;
447
448         for (i = 0; i < kinfo->num_tqps; i++) {
449                 queue = handle->kinfo.tqp[i];
450                 tqp = container_of(queue, struct hclge_tqp, q);
451                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
452                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
453                                            true);
454
455                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
456                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
457                 if (ret) {
458                         dev_err(&hdev->pdev->dev,
459                                 "Query tqp stat fail, status = %d,queue = %d\n",
460                                 ret, i);
461                         return ret;
462                 }
463                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
464                         le32_to_cpu(desc[0].data[1]);
465         }
466
467         for (i = 0; i < kinfo->num_tqps; i++) {
468                 queue = handle->kinfo.tqp[i];
469                 tqp = container_of(queue, struct hclge_tqp, q);
470                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
471                 hclge_cmd_setup_basic_desc(&desc[0],
472                                            HCLGE_OPC_QUERY_TX_STATUS,
473                                            true);
474
475                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
476                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
477                 if (ret) {
478                         dev_err(&hdev->pdev->dev,
479                                 "Query tqp stat fail, status = %d,queue = %d\n",
480                                 ret, i);
481                         return ret;
482                 }
483                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
484                         le32_to_cpu(desc[0].data[1]);
485         }
486
487         return 0;
488 }
489
490 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
491 {
492         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
493         struct hclge_tqp *tqp;
494         u64 *buff = data;
495         int i;
496
497         for (i = 0; i < kinfo->num_tqps; i++) {
498                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
499                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
500         }
501
502         for (i = 0; i < kinfo->num_tqps; i++) {
503                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
504                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
505         }
506
507         return buff;
508 }
509
510 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
511 {
512         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
513
514         /* each tqp has TX & RX two queues */
515         return kinfo->num_tqps * (2);
516 }
517
518 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
519 {
520         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
521         u8 *buff = data;
522         int i = 0;
523
524         for (i = 0; i < kinfo->num_tqps; i++) {
525                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
526                         struct hclge_tqp, q);
527                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
528                          tqp->index);
529                 buff = buff + ETH_GSTRING_LEN;
530         }
531
532         for (i = 0; i < kinfo->num_tqps; i++) {
533                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
534                         struct hclge_tqp, q);
535                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
536                          tqp->index);
537                 buff = buff + ETH_GSTRING_LEN;
538         }
539
540         return buff;
541 }
542
543 static u64 *hclge_comm_get_stats(const void *comm_stats,
544                                  const struct hclge_comm_stats_str strs[],
545                                  int size, u64 *data)
546 {
547         u64 *buf = data;
548         u32 i;
549
550         for (i = 0; i < size; i++)
551                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
552
553         return buf + size;
554 }
555
556 static u8 *hclge_comm_get_strings(u32 stringset,
557                                   const struct hclge_comm_stats_str strs[],
558                                   int size, u8 *data)
559 {
560         char *buff = (char *)data;
561         u32 i;
562
563         if (stringset != ETH_SS_STATS)
564                 return buff;
565
566         for (i = 0; i < size; i++) {
567                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
568                 buff = buff + ETH_GSTRING_LEN;
569         }
570
571         return (u8 *)buff;
572 }
573
574 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
575 {
576         struct hnae3_handle *handle;
577         int status;
578
579         handle = &hdev->vport[0].nic;
580         if (handle->client) {
581                 status = hclge_tqps_update_stats(handle);
582                 if (status) {
583                         dev_err(&hdev->pdev->dev,
584                                 "Update TQPS stats fail, status = %d.\n",
585                                 status);
586                 }
587         }
588
589         status = hclge_mac_update_stats(hdev);
590         if (status)
591                 dev_err(&hdev->pdev->dev,
592                         "Update MAC stats fail, status = %d.\n", status);
593 }
594
595 static void hclge_update_stats(struct hnae3_handle *handle,
596                                struct net_device_stats *net_stats)
597 {
598         struct hclge_vport *vport = hclge_get_vport(handle);
599         struct hclge_dev *hdev = vport->back;
600         int status;
601
602         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
603                 return;
604
605         status = hclge_mac_update_stats(hdev);
606         if (status)
607                 dev_err(&hdev->pdev->dev,
608                         "Update MAC stats fail, status = %d.\n",
609                         status);
610
611         status = hclge_tqps_update_stats(handle);
612         if (status)
613                 dev_err(&hdev->pdev->dev,
614                         "Update TQPS stats fail, status = %d.\n",
615                         status);
616
617         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
618 }
619
620 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
621 {
622 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
623                 HNAE3_SUPPORT_PHY_LOOPBACK |\
624                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
625                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
626
627         struct hclge_vport *vport = hclge_get_vport(handle);
628         struct hclge_dev *hdev = vport->back;
629         int count = 0;
630
631         /* Loopback test support rules:
632          * mac: only GE mode support
633          * serdes: all mac mode will support include GE/XGE/LGE/CGE
634          * phy: only support when phy device exist on board
635          */
636         if (stringset == ETH_SS_TEST) {
637                 /* clear loopback bit flags at first */
638                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
639                 if (hdev->pdev->revision >= 0x21 ||
640                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
641                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
642                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
643                         count += 1;
644                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
645                 }
646
647                 count += 2;
648                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
649                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
650         } else if (stringset == ETH_SS_STATS) {
651                 count = ARRAY_SIZE(g_mac_stats_string) +
652                         hclge_tqps_get_sset_count(handle, stringset);
653         }
654
655         return count;
656 }
657
658 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
659                               u8 *data)
660 {
661         u8 *p = (char *)data;
662         int size;
663
664         if (stringset == ETH_SS_STATS) {
665                 size = ARRAY_SIZE(g_mac_stats_string);
666                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
667                                            size, p);
668                 p = hclge_tqps_get_strings(handle, p);
669         } else if (stringset == ETH_SS_TEST) {
670                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
671                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
672                                ETH_GSTRING_LEN);
673                         p += ETH_GSTRING_LEN;
674                 }
675                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
676                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
677                                ETH_GSTRING_LEN);
678                         p += ETH_GSTRING_LEN;
679                 }
680                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
681                         memcpy(p,
682                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
683                                ETH_GSTRING_LEN);
684                         p += ETH_GSTRING_LEN;
685                 }
686                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
687                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
688                                ETH_GSTRING_LEN);
689                         p += ETH_GSTRING_LEN;
690                 }
691         }
692 }
693
694 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
695 {
696         struct hclge_vport *vport = hclge_get_vport(handle);
697         struct hclge_dev *hdev = vport->back;
698         u64 *p;
699
700         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
701                                  ARRAY_SIZE(g_mac_stats_string), data);
702         p = hclge_tqps_get_stats(handle, p);
703 }
704
705 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
706                                      u64 *rx_cnt)
707 {
708         struct hclge_vport *vport = hclge_get_vport(handle);
709         struct hclge_dev *hdev = vport->back;
710
711         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
712         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
713 }
714
715 static int hclge_parse_func_status(struct hclge_dev *hdev,
716                                    struct hclge_func_status_cmd *status)
717 {
718         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
719                 return -EINVAL;
720
721         /* Set the pf to main pf */
722         if (status->pf_state & HCLGE_PF_STATE_MAIN)
723                 hdev->flag |= HCLGE_FLAG_MAIN;
724         else
725                 hdev->flag &= ~HCLGE_FLAG_MAIN;
726
727         return 0;
728 }
729
730 static int hclge_query_function_status(struct hclge_dev *hdev)
731 {
732 #define HCLGE_QUERY_MAX_CNT     5
733
734         struct hclge_func_status_cmd *req;
735         struct hclge_desc desc;
736         int timeout = 0;
737         int ret;
738
739         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
740         req = (struct hclge_func_status_cmd *)desc.data;
741
742         do {
743                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
744                 if (ret) {
745                         dev_err(&hdev->pdev->dev,
746                                 "query function status failed %d.\n", ret);
747                         return ret;
748                 }
749
750                 /* Check pf reset is done */
751                 if (req->pf_state)
752                         break;
753                 usleep_range(1000, 2000);
754         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
755
756         ret = hclge_parse_func_status(hdev, req);
757
758         return ret;
759 }
760
761 static int hclge_query_pf_resource(struct hclge_dev *hdev)
762 {
763         struct hclge_pf_res_cmd *req;
764         struct hclge_desc desc;
765         int ret;
766
767         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
768         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
769         if (ret) {
770                 dev_err(&hdev->pdev->dev,
771                         "query pf resource failed %d.\n", ret);
772                 return ret;
773         }
774
775         req = (struct hclge_pf_res_cmd *)desc.data;
776         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
777         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
778
779         if (req->tx_buf_size)
780                 hdev->tx_buf_size =
781                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
782         else
783                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
784
785         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
786
787         if (req->dv_buf_size)
788                 hdev->dv_buf_size =
789                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
790         else
791                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
792
793         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
794
795         if (hnae3_dev_roce_supported(hdev)) {
796                 hdev->roce_base_msix_offset =
797                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
798                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
799                 hdev->num_roce_msi =
800                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
801                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
802
803                 /* PF should have NIC vectors and Roce vectors,
804                  * NIC vectors are queued before Roce vectors.
805                  */
806                 hdev->num_msi = hdev->num_roce_msi +
807                                 hdev->roce_base_msix_offset;
808         } else {
809                 hdev->num_msi =
810                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
811                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
812         }
813
814         return 0;
815 }
816
817 static int hclge_parse_speed(int speed_cmd, int *speed)
818 {
819         switch (speed_cmd) {
820         case 6:
821                 *speed = HCLGE_MAC_SPEED_10M;
822                 break;
823         case 7:
824                 *speed = HCLGE_MAC_SPEED_100M;
825                 break;
826         case 0:
827                 *speed = HCLGE_MAC_SPEED_1G;
828                 break;
829         case 1:
830                 *speed = HCLGE_MAC_SPEED_10G;
831                 break;
832         case 2:
833                 *speed = HCLGE_MAC_SPEED_25G;
834                 break;
835         case 3:
836                 *speed = HCLGE_MAC_SPEED_40G;
837                 break;
838         case 4:
839                 *speed = HCLGE_MAC_SPEED_50G;
840                 break;
841         case 5:
842                 *speed = HCLGE_MAC_SPEED_100G;
843                 break;
844         default:
845                 return -EINVAL;
846         }
847
848         return 0;
849 }
850
851 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
852 {
853         struct hclge_vport *vport = hclge_get_vport(handle);
854         struct hclge_dev *hdev = vport->back;
855         u32 speed_ability = hdev->hw.mac.speed_ability;
856         u32 speed_bit = 0;
857
858         switch (speed) {
859         case HCLGE_MAC_SPEED_10M:
860                 speed_bit = HCLGE_SUPPORT_10M_BIT;
861                 break;
862         case HCLGE_MAC_SPEED_100M:
863                 speed_bit = HCLGE_SUPPORT_100M_BIT;
864                 break;
865         case HCLGE_MAC_SPEED_1G:
866                 speed_bit = HCLGE_SUPPORT_1G_BIT;
867                 break;
868         case HCLGE_MAC_SPEED_10G:
869                 speed_bit = HCLGE_SUPPORT_10G_BIT;
870                 break;
871         case HCLGE_MAC_SPEED_25G:
872                 speed_bit = HCLGE_SUPPORT_25G_BIT;
873                 break;
874         case HCLGE_MAC_SPEED_40G:
875                 speed_bit = HCLGE_SUPPORT_40G_BIT;
876                 break;
877         case HCLGE_MAC_SPEED_50G:
878                 speed_bit = HCLGE_SUPPORT_50G_BIT;
879                 break;
880         case HCLGE_MAC_SPEED_100G:
881                 speed_bit = HCLGE_SUPPORT_100G_BIT;
882                 break;
883         default:
884                 return -EINVAL;
885         }
886
887         if (speed_bit & speed_ability)
888                 return 0;
889
890         return -EINVAL;
891 }
892
893 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
894 {
895         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
896                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
897                                  mac->supported);
898         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
899                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
900                                  mac->supported);
901         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
902                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
903                                  mac->supported);
904         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
905                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
906                                  mac->supported);
907         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
908                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
909                                  mac->supported);
910 }
911
912 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
913 {
914         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
915                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
916                                  mac->supported);
917         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
918                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
919                                  mac->supported);
920         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
921                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
922                                  mac->supported);
923         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
924                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
925                                  mac->supported);
926         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
927                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
928                                  mac->supported);
929 }
930
931 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
932 {
933         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
934                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
935                                  mac->supported);
936         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
937                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
938                                  mac->supported);
939         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
940                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
941                                  mac->supported);
942         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
943                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
944                                  mac->supported);
945         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
946                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
947                                  mac->supported);
948 }
949
950 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
951 {
952         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
953                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
954                                  mac->supported);
955         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
956                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
957                                  mac->supported);
958         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
959                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
960                                  mac->supported);
961         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
962                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
963                                  mac->supported);
964         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
965                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
966                                  mac->supported);
967         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
968                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
969                                  mac->supported);
970 }
971
972 static void hclge_convert_setting_fec(struct hclge_mac *mac)
973 {
974         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
975         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
976
977         switch (mac->speed) {
978         case HCLGE_MAC_SPEED_10G:
979         case HCLGE_MAC_SPEED_40G:
980                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
981                                  mac->supported);
982                 mac->fec_ability =
983                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
984                 break;
985         case HCLGE_MAC_SPEED_25G:
986         case HCLGE_MAC_SPEED_50G:
987                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
988                                  mac->supported);
989                 mac->fec_ability =
990                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
991                         BIT(HNAE3_FEC_AUTO);
992                 break;
993         case HCLGE_MAC_SPEED_100G:
994                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
995                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
996                 break;
997         default:
998                 mac->fec_ability = 0;
999                 break;
1000         }
1001 }
1002
1003 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1004                                         u8 speed_ability)
1005 {
1006         struct hclge_mac *mac = &hdev->hw.mac;
1007
1008         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1009                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1010                                  mac->supported);
1011
1012         hclge_convert_setting_sr(mac, speed_ability);
1013         hclge_convert_setting_lr(mac, speed_ability);
1014         hclge_convert_setting_cr(mac, speed_ability);
1015         if (hdev->pdev->revision >= 0x21)
1016                 hclge_convert_setting_fec(mac);
1017
1018         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1019         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1020         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1021 }
1022
1023 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1024                                             u8 speed_ability)
1025 {
1026         struct hclge_mac *mac = &hdev->hw.mac;
1027
1028         hclge_convert_setting_kr(mac, speed_ability);
1029         if (hdev->pdev->revision >= 0x21)
1030                 hclge_convert_setting_fec(mac);
1031         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1032         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1033         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1034 }
1035
1036 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1037                                          u8 speed_ability)
1038 {
1039         unsigned long *supported = hdev->hw.mac.supported;
1040
1041         /* default to support all speed for GE port */
1042         if (!speed_ability)
1043                 speed_ability = HCLGE_SUPPORT_GE;
1044
1045         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1047                                  supported);
1048
1049         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1050                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1051                                  supported);
1052                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1053                                  supported);
1054         }
1055
1056         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1057                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1059         }
1060
1061         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1062         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1063         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1064         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1065 }
1066
1067 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1068 {
1069         u8 media_type = hdev->hw.mac.media_type;
1070
1071         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1072                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1073         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1074                 hclge_parse_copper_link_mode(hdev, speed_ability);
1075         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1076                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1077 }
1078 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1079 {
1080         struct hclge_cfg_param_cmd *req;
1081         u64 mac_addr_tmp_high;
1082         u64 mac_addr_tmp;
1083         unsigned int i;
1084
1085         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1086
1087         /* get the configuration */
1088         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1089                                               HCLGE_CFG_VMDQ_M,
1090                                               HCLGE_CFG_VMDQ_S);
1091         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1093         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1094                                             HCLGE_CFG_TQP_DESC_N_M,
1095                                             HCLGE_CFG_TQP_DESC_N_S);
1096
1097         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1098                                         HCLGE_CFG_PHY_ADDR_M,
1099                                         HCLGE_CFG_PHY_ADDR_S);
1100         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1101                                           HCLGE_CFG_MEDIA_TP_M,
1102                                           HCLGE_CFG_MEDIA_TP_S);
1103         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1104                                           HCLGE_CFG_RX_BUF_LEN_M,
1105                                           HCLGE_CFG_RX_BUF_LEN_S);
1106         /* get mac_address */
1107         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1108         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1109                                             HCLGE_CFG_MAC_ADDR_H_M,
1110                                             HCLGE_CFG_MAC_ADDR_H_S);
1111
1112         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1113
1114         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1115                                              HCLGE_CFG_DEFAULT_SPEED_M,
1116                                              HCLGE_CFG_DEFAULT_SPEED_S);
1117         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1118                                             HCLGE_CFG_RSS_SIZE_M,
1119                                             HCLGE_CFG_RSS_SIZE_S);
1120
1121         for (i = 0; i < ETH_ALEN; i++)
1122                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1123
1124         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1125         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1126
1127         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1128                                              HCLGE_CFG_SPEED_ABILITY_M,
1129                                              HCLGE_CFG_SPEED_ABILITY_S);
1130         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1131                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1132                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1133         if (!cfg->umv_space)
1134                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1135 }
1136
1137 /* hclge_get_cfg: query the static parameter from flash
1138  * @hdev: pointer to struct hclge_dev
1139  * @hcfg: the config structure to be getted
1140  */
1141 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1142 {
1143         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1144         struct hclge_cfg_param_cmd *req;
1145         unsigned int i;
1146         int ret;
1147
1148         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1149                 u32 offset = 0;
1150
1151                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1152                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1153                                            true);
1154                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1155                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1156                 /* Len should be united by 4 bytes when send to hardware */
1157                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1158                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1159                 req->offset = cpu_to_le32(offset);
1160         }
1161
1162         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1163         if (ret) {
1164                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1165                 return ret;
1166         }
1167
1168         hclge_parse_cfg(hcfg, desc);
1169
1170         return 0;
1171 }
1172
1173 static int hclge_get_cap(struct hclge_dev *hdev)
1174 {
1175         int ret;
1176
1177         ret = hclge_query_function_status(hdev);
1178         if (ret) {
1179                 dev_err(&hdev->pdev->dev,
1180                         "query function status error %d.\n", ret);
1181                 return ret;
1182         }
1183
1184         /* get pf resource */
1185         ret = hclge_query_pf_resource(hdev);
1186         if (ret)
1187                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1188
1189         return ret;
1190 }
1191
1192 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1193 {
1194 #define HCLGE_MIN_TX_DESC       64
1195 #define HCLGE_MIN_RX_DESC       64
1196
1197         if (!is_kdump_kernel())
1198                 return;
1199
1200         dev_info(&hdev->pdev->dev,
1201                  "Running kdump kernel. Using minimal resources\n");
1202
1203         /* minimal queue pairs equals to the number of vports */
1204         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1205         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1206         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1207 }
1208
1209 static int hclge_configure(struct hclge_dev *hdev)
1210 {
1211         struct hclge_cfg cfg;
1212         unsigned int i;
1213         int ret;
1214
1215         ret = hclge_get_cfg(hdev, &cfg);
1216         if (ret) {
1217                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1218                 return ret;
1219         }
1220
1221         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1222         hdev->base_tqp_pid = 0;
1223         hdev->rss_size_max = cfg.rss_size_max;
1224         hdev->rx_buf_len = cfg.rx_buf_len;
1225         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1226         hdev->hw.mac.media_type = cfg.media_type;
1227         hdev->hw.mac.phy_addr = cfg.phy_addr;
1228         hdev->num_tx_desc = cfg.tqp_desc_num;
1229         hdev->num_rx_desc = cfg.tqp_desc_num;
1230         hdev->tm_info.num_pg = 1;
1231         hdev->tc_max = cfg.tc_num;
1232         hdev->tm_info.hw_pfc_map = 0;
1233         hdev->wanted_umv_size = cfg.umv_space;
1234
1235         if (hnae3_dev_fd_supported(hdev)) {
1236                 hdev->fd_en = true;
1237                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1238         }
1239
1240         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1241         if (ret) {
1242                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1243                 return ret;
1244         }
1245
1246         hclge_parse_link_mode(hdev, cfg.speed_ability);
1247
1248         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1249             (hdev->tc_max < 1)) {
1250                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251                          hdev->tc_max);
1252                 hdev->tc_max = 1;
1253         }
1254
1255         /* Dev does not support DCB */
1256         if (!hnae3_dev_dcb_supported(hdev)) {
1257                 hdev->tc_max = 1;
1258                 hdev->pfc_max = 0;
1259         } else {
1260                 hdev->pfc_max = hdev->tc_max;
1261         }
1262
1263         hdev->tm_info.num_tc = 1;
1264
1265         /* Currently not support uncontiuous tc */
1266         for (i = 0; i < hdev->tm_info.num_tc; i++)
1267                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1268
1269         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1270
1271         hclge_init_kdump_kernel_config(hdev);
1272
1273         return ret;
1274 }
1275
1276 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1277                             unsigned int tso_mss_max)
1278 {
1279         struct hclge_cfg_tso_status_cmd *req;
1280         struct hclge_desc desc;
1281         u16 tso_mss;
1282
1283         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1284
1285         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1286
1287         tso_mss = 0;
1288         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1289                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1290         req->tso_mss_min = cpu_to_le16(tso_mss);
1291
1292         tso_mss = 0;
1293         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1294                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1295         req->tso_mss_max = cpu_to_le16(tso_mss);
1296
1297         return hclge_cmd_send(&hdev->hw, &desc, 1);
1298 }
1299
1300 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1301 {
1302         struct hclge_cfg_gro_status_cmd *req;
1303         struct hclge_desc desc;
1304         int ret;
1305
1306         if (!hnae3_dev_gro_supported(hdev))
1307                 return 0;
1308
1309         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1310         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1311
1312         req->gro_en = cpu_to_le16(en ? 1 : 0);
1313
1314         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1315         if (ret)
1316                 dev_err(&hdev->pdev->dev,
1317                         "GRO hardware config cmd failed, ret = %d\n", ret);
1318
1319         return ret;
1320 }
1321
1322 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1323 {
1324         struct hclge_tqp *tqp;
1325         int i;
1326
1327         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1328                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1329         if (!hdev->htqp)
1330                 return -ENOMEM;
1331
1332         tqp = hdev->htqp;
1333
1334         for (i = 0; i < hdev->num_tqps; i++) {
1335                 tqp->dev = &hdev->pdev->dev;
1336                 tqp->index = i;
1337
1338                 tqp->q.ae_algo = &ae_algo;
1339                 tqp->q.buf_size = hdev->rx_buf_len;
1340                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1341                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1342                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1343                         i * HCLGE_TQP_REG_SIZE;
1344
1345                 tqp++;
1346         }
1347
1348         return 0;
1349 }
1350
1351 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1352                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1353 {
1354         struct hclge_tqp_map_cmd *req;
1355         struct hclge_desc desc;
1356         int ret;
1357
1358         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1359
1360         req = (struct hclge_tqp_map_cmd *)desc.data;
1361         req->tqp_id = cpu_to_le16(tqp_pid);
1362         req->tqp_vf = func_id;
1363         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1364         if (!is_pf)
1365                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1366         req->tqp_vid = cpu_to_le16(tqp_vid);
1367
1368         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1369         if (ret)
1370                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1371
1372         return ret;
1373 }
1374
1375 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1376 {
1377         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1378         struct hclge_dev *hdev = vport->back;
1379         int i, alloced;
1380
1381         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1382              alloced < num_tqps; i++) {
1383                 if (!hdev->htqp[i].alloced) {
1384                         hdev->htqp[i].q.handle = &vport->nic;
1385                         hdev->htqp[i].q.tqp_index = alloced;
1386                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1387                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1388                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1389                         hdev->htqp[i].alloced = true;
1390                         alloced++;
1391                 }
1392         }
1393         vport->alloc_tqps = alloced;
1394         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1395                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1396
1397         return 0;
1398 }
1399
1400 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1401                             u16 num_tx_desc, u16 num_rx_desc)
1402
1403 {
1404         struct hnae3_handle *nic = &vport->nic;
1405         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1406         struct hclge_dev *hdev = vport->back;
1407         int ret;
1408
1409         kinfo->num_tx_desc = num_tx_desc;
1410         kinfo->num_rx_desc = num_rx_desc;
1411
1412         kinfo->rx_buf_len = hdev->rx_buf_len;
1413
1414         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1415                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1416         if (!kinfo->tqp)
1417                 return -ENOMEM;
1418
1419         ret = hclge_assign_tqp(vport, num_tqps);
1420         if (ret)
1421                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1422
1423         return ret;
1424 }
1425
1426 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1427                                   struct hclge_vport *vport)
1428 {
1429         struct hnae3_handle *nic = &vport->nic;
1430         struct hnae3_knic_private_info *kinfo;
1431         u16 i;
1432
1433         kinfo = &nic->kinfo;
1434         for (i = 0; i < vport->alloc_tqps; i++) {
1435                 struct hclge_tqp *q =
1436                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1437                 bool is_pf;
1438                 int ret;
1439
1440                 is_pf = !(vport->vport_id);
1441                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1442                                              i, is_pf);
1443                 if (ret)
1444                         return ret;
1445         }
1446
1447         return 0;
1448 }
1449
1450 static int hclge_map_tqp(struct hclge_dev *hdev)
1451 {
1452         struct hclge_vport *vport = hdev->vport;
1453         u16 i, num_vport;
1454
1455         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1456         for (i = 0; i < num_vport; i++) {
1457                 int ret;
1458
1459                 ret = hclge_map_tqp_to_vport(hdev, vport);
1460                 if (ret)
1461                         return ret;
1462
1463                 vport++;
1464         }
1465
1466         return 0;
1467 }
1468
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1470 {
1471         struct hnae3_handle *nic = &vport->nic;
1472         struct hclge_dev *hdev = vport->back;
1473         int ret;
1474
1475         nic->pdev = hdev->pdev;
1476         nic->ae_algo = &ae_algo;
1477         nic->numa_node_mask = hdev->numa_node_mask;
1478
1479         ret = hclge_knic_setup(vport, num_tqps,
1480                                hdev->num_tx_desc, hdev->num_rx_desc);
1481         if (ret)
1482                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1483
1484         return ret;
1485 }
1486
1487 static int hclge_alloc_vport(struct hclge_dev *hdev)
1488 {
1489         struct pci_dev *pdev = hdev->pdev;
1490         struct hclge_vport *vport;
1491         u32 tqp_main_vport;
1492         u32 tqp_per_vport;
1493         int num_vport, i;
1494         int ret;
1495
1496         /* We need to alloc a vport for main NIC of PF */
1497         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1498
1499         if (hdev->num_tqps < num_vport) {
1500                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1501                         hdev->num_tqps, num_vport);
1502                 return -EINVAL;
1503         }
1504
1505         /* Alloc the same number of TQPs for every vport */
1506         tqp_per_vport = hdev->num_tqps / num_vport;
1507         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1508
1509         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1510                              GFP_KERNEL);
1511         if (!vport)
1512                 return -ENOMEM;
1513
1514         hdev->vport = vport;
1515         hdev->num_alloc_vport = num_vport;
1516
1517         if (IS_ENABLED(CONFIG_PCI_IOV))
1518                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1519
1520         for (i = 0; i < num_vport; i++) {
1521                 vport->back = hdev;
1522                 vport->vport_id = i;
1523                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1524                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1525                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1526                 INIT_LIST_HEAD(&vport->vlan_list);
1527                 INIT_LIST_HEAD(&vport->uc_mac_list);
1528                 INIT_LIST_HEAD(&vport->mc_mac_list);
1529
1530                 if (i == 0)
1531                         ret = hclge_vport_setup(vport, tqp_main_vport);
1532                 else
1533                         ret = hclge_vport_setup(vport, tqp_per_vport);
1534                 if (ret) {
1535                         dev_err(&pdev->dev,
1536                                 "vport setup failed for vport %d, %d\n",
1537                                 i, ret);
1538                         return ret;
1539                 }
1540
1541                 vport++;
1542         }
1543
1544         return 0;
1545 }
1546
1547 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1548                                     struct hclge_pkt_buf_alloc *buf_alloc)
1549 {
1550 /* TX buffer size is unit by 128 byte */
1551 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1552 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1553         struct hclge_tx_buff_alloc_cmd *req;
1554         struct hclge_desc desc;
1555         int ret;
1556         u8 i;
1557
1558         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1559
1560         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1561         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1562                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1563
1564                 req->tx_pkt_buff[i] =
1565                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1566                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1567         }
1568
1569         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1570         if (ret)
1571                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1572                         ret);
1573
1574         return ret;
1575 }
1576
1577 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1578                                  struct hclge_pkt_buf_alloc *buf_alloc)
1579 {
1580         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1581
1582         if (ret)
1583                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1584
1585         return ret;
1586 }
1587
1588 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1589 {
1590         unsigned int i;
1591         u32 cnt = 0;
1592
1593         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1594                 if (hdev->hw_tc_map & BIT(i))
1595                         cnt++;
1596         return cnt;
1597 }
1598
1599 /* Get the number of pfc enabled TCs, which have private buffer */
1600 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1601                                   struct hclge_pkt_buf_alloc *buf_alloc)
1602 {
1603         struct hclge_priv_buf *priv;
1604         unsigned int i;
1605         int cnt = 0;
1606
1607         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1608                 priv = &buf_alloc->priv_buf[i];
1609                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1610                     priv->enable)
1611                         cnt++;
1612         }
1613
1614         return cnt;
1615 }
1616
1617 /* Get the number of pfc disabled TCs, which have private buffer */
1618 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1619                                      struct hclge_pkt_buf_alloc *buf_alloc)
1620 {
1621         struct hclge_priv_buf *priv;
1622         unsigned int i;
1623         int cnt = 0;
1624
1625         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1626                 priv = &buf_alloc->priv_buf[i];
1627                 if (hdev->hw_tc_map & BIT(i) &&
1628                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1629                     priv->enable)
1630                         cnt++;
1631         }
1632
1633         return cnt;
1634 }
1635
1636 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1637 {
1638         struct hclge_priv_buf *priv;
1639         u32 rx_priv = 0;
1640         int i;
1641
1642         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1643                 priv = &buf_alloc->priv_buf[i];
1644                 if (priv->enable)
1645                         rx_priv += priv->buf_size;
1646         }
1647         return rx_priv;
1648 }
1649
1650 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1651 {
1652         u32 i, total_tx_size = 0;
1653
1654         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1655                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1656
1657         return total_tx_size;
1658 }
1659
1660 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1661                                 struct hclge_pkt_buf_alloc *buf_alloc,
1662                                 u32 rx_all)
1663 {
1664         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1665         u32 tc_num = hclge_get_tc_num(hdev);
1666         u32 shared_buf, aligned_mps;
1667         u32 rx_priv;
1668         int i;
1669
1670         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1671
1672         if (hnae3_dev_dcb_supported(hdev))
1673                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1674                                         hdev->dv_buf_size;
1675         else
1676                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1677                                         + hdev->dv_buf_size;
1678
1679         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1680         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1681                              HCLGE_BUF_SIZE_UNIT);
1682
1683         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1684         if (rx_all < rx_priv + shared_std)
1685                 return false;
1686
1687         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1688         buf_alloc->s_buf.buf_size = shared_buf;
1689         if (hnae3_dev_dcb_supported(hdev)) {
1690                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1691                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1692                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1693                                   HCLGE_BUF_SIZE_UNIT);
1694         } else {
1695                 buf_alloc->s_buf.self.high = aligned_mps +
1696                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1697                 buf_alloc->s_buf.self.low = aligned_mps;
1698         }
1699
1700         if (hnae3_dev_dcb_supported(hdev)) {
1701                 hi_thrd = shared_buf - hdev->dv_buf_size;
1702
1703                 if (tc_num <= NEED_RESERVE_TC_NUM)
1704                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1705                                         / BUF_MAX_PERCENT;
1706
1707                 if (tc_num)
1708                         hi_thrd = hi_thrd / tc_num;
1709
1710                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1711                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1712                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1713         } else {
1714                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1715                 lo_thrd = aligned_mps;
1716         }
1717
1718         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1719                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1720                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1721         }
1722
1723         return true;
1724 }
1725
1726 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1727                                 struct hclge_pkt_buf_alloc *buf_alloc)
1728 {
1729         u32 i, total_size;
1730
1731         total_size = hdev->pkt_buf_size;
1732
1733         /* alloc tx buffer for all enabled tc */
1734         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1735                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1736
1737                 if (hdev->hw_tc_map & BIT(i)) {
1738                         if (total_size < hdev->tx_buf_size)
1739                                 return -ENOMEM;
1740
1741                         priv->tx_buf_size = hdev->tx_buf_size;
1742                 } else {
1743                         priv->tx_buf_size = 0;
1744                 }
1745
1746                 total_size -= priv->tx_buf_size;
1747         }
1748
1749         return 0;
1750 }
1751
1752 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1753                                   struct hclge_pkt_buf_alloc *buf_alloc)
1754 {
1755         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1756         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1757         unsigned int i;
1758
1759         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1760                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1761
1762                 priv->enable = 0;
1763                 priv->wl.low = 0;
1764                 priv->wl.high = 0;
1765                 priv->buf_size = 0;
1766
1767                 if (!(hdev->hw_tc_map & BIT(i)))
1768                         continue;
1769
1770                 priv->enable = 1;
1771
1772                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1773                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1774                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1775                                                 HCLGE_BUF_SIZE_UNIT);
1776                 } else {
1777                         priv->wl.low = 0;
1778                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1779                                         aligned_mps;
1780                 }
1781
1782                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1783         }
1784
1785         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1786 }
1787
1788 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1789                                           struct hclge_pkt_buf_alloc *buf_alloc)
1790 {
1791         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1792         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1793         int i;
1794
1795         /* let the last to be cleared first */
1796         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1797                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1798                 unsigned int mask = BIT((unsigned int)i);
1799
1800                 if (hdev->hw_tc_map & mask &&
1801                     !(hdev->tm_info.hw_pfc_map & mask)) {
1802                         /* Clear the no pfc TC private buffer */
1803                         priv->wl.low = 0;
1804                         priv->wl.high = 0;
1805                         priv->buf_size = 0;
1806                         priv->enable = 0;
1807                         no_pfc_priv_num--;
1808                 }
1809
1810                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1811                     no_pfc_priv_num == 0)
1812                         break;
1813         }
1814
1815         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1816 }
1817
1818 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1819                                         struct hclge_pkt_buf_alloc *buf_alloc)
1820 {
1821         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1822         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1823         int i;
1824
1825         /* let the last to be cleared first */
1826         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1827                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1828                 unsigned int mask = BIT((unsigned int)i);
1829
1830                 if (hdev->hw_tc_map & mask &&
1831                     hdev->tm_info.hw_pfc_map & mask) {
1832                         /* Reduce the number of pfc TC with private buffer */
1833                         priv->wl.low = 0;
1834                         priv->enable = 0;
1835                         priv->wl.high = 0;
1836                         priv->buf_size = 0;
1837                         pfc_priv_num--;
1838                 }
1839
1840                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1841                     pfc_priv_num == 0)
1842                         break;
1843         }
1844
1845         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1846 }
1847
1848 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1849                                       struct hclge_pkt_buf_alloc *buf_alloc)
1850 {
1851 #define COMPENSATE_BUFFER       0x3C00
1852 #define COMPENSATE_HALF_MPS_NUM 5
1853 #define PRIV_WL_GAP             0x1800
1854
1855         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1856         u32 tc_num = hclge_get_tc_num(hdev);
1857         u32 half_mps = hdev->mps >> 1;
1858         u32 min_rx_priv;
1859         unsigned int i;
1860
1861         if (tc_num)
1862                 rx_priv = rx_priv / tc_num;
1863
1864         if (tc_num <= NEED_RESERVE_TC_NUM)
1865                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1866
1867         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1868                         COMPENSATE_HALF_MPS_NUM * half_mps;
1869         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1870         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1871
1872         if (rx_priv < min_rx_priv)
1873                 return false;
1874
1875         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1876                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1877
1878                 priv->enable = 0;
1879                 priv->wl.low = 0;
1880                 priv->wl.high = 0;
1881                 priv->buf_size = 0;
1882
1883                 if (!(hdev->hw_tc_map & BIT(i)))
1884                         continue;
1885
1886                 priv->enable = 1;
1887                 priv->buf_size = rx_priv;
1888                 priv->wl.high = rx_priv - hdev->dv_buf_size;
1889                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
1890         }
1891
1892         buf_alloc->s_buf.buf_size = 0;
1893
1894         return true;
1895 }
1896
1897 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1898  * @hdev: pointer to struct hclge_dev
1899  * @buf_alloc: pointer to buffer calculation data
1900  * @return: 0: calculate sucessful, negative: fail
1901  */
1902 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1903                                 struct hclge_pkt_buf_alloc *buf_alloc)
1904 {
1905         /* When DCB is not supported, rx private buffer is not allocated. */
1906         if (!hnae3_dev_dcb_supported(hdev)) {
1907                 u32 rx_all = hdev->pkt_buf_size;
1908
1909                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1910                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1911                         return -ENOMEM;
1912
1913                 return 0;
1914         }
1915
1916         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
1917                 return 0;
1918
1919         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1920                 return 0;
1921
1922         /* try to decrease the buffer size */
1923         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1924                 return 0;
1925
1926         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1927                 return 0;
1928
1929         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1930                 return 0;
1931
1932         return -ENOMEM;
1933 }
1934
1935 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1936                                    struct hclge_pkt_buf_alloc *buf_alloc)
1937 {
1938         struct hclge_rx_priv_buff_cmd *req;
1939         struct hclge_desc desc;
1940         int ret;
1941         int i;
1942
1943         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1944         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1945
1946         /* Alloc private buffer TCs */
1947         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1948                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1949
1950                 req->buf_num[i] =
1951                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1952                 req->buf_num[i] |=
1953                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1954         }
1955
1956         req->shared_buf =
1957                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1958                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1959
1960         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1961         if (ret)
1962                 dev_err(&hdev->pdev->dev,
1963                         "rx private buffer alloc cmd failed %d\n", ret);
1964
1965         return ret;
1966 }
1967
1968 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1969                                    struct hclge_pkt_buf_alloc *buf_alloc)
1970 {
1971         struct hclge_rx_priv_wl_buf *req;
1972         struct hclge_priv_buf *priv;
1973         struct hclge_desc desc[2];
1974         int i, j;
1975         int ret;
1976
1977         for (i = 0; i < 2; i++) {
1978                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1979                                            false);
1980                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1981
1982                 /* The first descriptor set the NEXT bit to 1 */
1983                 if (i == 0)
1984                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1985                 else
1986                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1987
1988                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1989                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1990
1991                         priv = &buf_alloc->priv_buf[idx];
1992                         req->tc_wl[j].high =
1993                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1994                         req->tc_wl[j].high |=
1995                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1996                         req->tc_wl[j].low =
1997                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1998                         req->tc_wl[j].low |=
1999                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2000                 }
2001         }
2002
2003         /* Send 2 descriptor at one time */
2004         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2005         if (ret)
2006                 dev_err(&hdev->pdev->dev,
2007                         "rx private waterline config cmd failed %d\n",
2008                         ret);
2009         return ret;
2010 }
2011
2012 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2013                                     struct hclge_pkt_buf_alloc *buf_alloc)
2014 {
2015         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2016         struct hclge_rx_com_thrd *req;
2017         struct hclge_desc desc[2];
2018         struct hclge_tc_thrd *tc;
2019         int i, j;
2020         int ret;
2021
2022         for (i = 0; i < 2; i++) {
2023                 hclge_cmd_setup_basic_desc(&desc[i],
2024                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2025                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2026
2027                 /* The first descriptor set the NEXT bit to 1 */
2028                 if (i == 0)
2029                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2030                 else
2031                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2032
2033                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2034                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2035
2036                         req->com_thrd[j].high =
2037                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2038                         req->com_thrd[j].high |=
2039                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2040                         req->com_thrd[j].low =
2041                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2042                         req->com_thrd[j].low |=
2043                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2044                 }
2045         }
2046
2047         /* Send 2 descriptors at one time */
2048         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2049         if (ret)
2050                 dev_err(&hdev->pdev->dev,
2051                         "common threshold config cmd failed %d\n", ret);
2052         return ret;
2053 }
2054
2055 static int hclge_common_wl_config(struct hclge_dev *hdev,
2056                                   struct hclge_pkt_buf_alloc *buf_alloc)
2057 {
2058         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2059         struct hclge_rx_com_wl *req;
2060         struct hclge_desc desc;
2061         int ret;
2062
2063         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2064
2065         req = (struct hclge_rx_com_wl *)desc.data;
2066         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2067         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2068
2069         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2070         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2071
2072         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2073         if (ret)
2074                 dev_err(&hdev->pdev->dev,
2075                         "common waterline config cmd failed %d\n", ret);
2076
2077         return ret;
2078 }
2079
2080 int hclge_buffer_alloc(struct hclge_dev *hdev)
2081 {
2082         struct hclge_pkt_buf_alloc *pkt_buf;
2083         int ret;
2084
2085         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2086         if (!pkt_buf)
2087                 return -ENOMEM;
2088
2089         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2090         if (ret) {
2091                 dev_err(&hdev->pdev->dev,
2092                         "could not calc tx buffer size for all TCs %d\n", ret);
2093                 goto out;
2094         }
2095
2096         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2097         if (ret) {
2098                 dev_err(&hdev->pdev->dev,
2099                         "could not alloc tx buffers %d\n", ret);
2100                 goto out;
2101         }
2102
2103         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2104         if (ret) {
2105                 dev_err(&hdev->pdev->dev,
2106                         "could not calc rx priv buffer size for all TCs %d\n",
2107                         ret);
2108                 goto out;
2109         }
2110
2111         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2112         if (ret) {
2113                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2114                         ret);
2115                 goto out;
2116         }
2117
2118         if (hnae3_dev_dcb_supported(hdev)) {
2119                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2120                 if (ret) {
2121                         dev_err(&hdev->pdev->dev,
2122                                 "could not configure rx private waterline %d\n",
2123                                 ret);
2124                         goto out;
2125                 }
2126
2127                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2128                 if (ret) {
2129                         dev_err(&hdev->pdev->dev,
2130                                 "could not configure common threshold %d\n",
2131                                 ret);
2132                         goto out;
2133                 }
2134         }
2135
2136         ret = hclge_common_wl_config(hdev, pkt_buf);
2137         if (ret)
2138                 dev_err(&hdev->pdev->dev,
2139                         "could not configure common waterline %d\n", ret);
2140
2141 out:
2142         kfree(pkt_buf);
2143         return ret;
2144 }
2145
2146 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2147 {
2148         struct hnae3_handle *roce = &vport->roce;
2149         struct hnae3_handle *nic = &vport->nic;
2150
2151         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2152
2153         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2154             vport->back->num_msi_left == 0)
2155                 return -EINVAL;
2156
2157         roce->rinfo.base_vector = vport->back->roce_base_vector;
2158
2159         roce->rinfo.netdev = nic->kinfo.netdev;
2160         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2161
2162         roce->pdev = nic->pdev;
2163         roce->ae_algo = nic->ae_algo;
2164         roce->numa_node_mask = nic->numa_node_mask;
2165
2166         return 0;
2167 }
2168
2169 static int hclge_init_msi(struct hclge_dev *hdev)
2170 {
2171         struct pci_dev *pdev = hdev->pdev;
2172         int vectors;
2173         int i;
2174
2175         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2176                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2177         if (vectors < 0) {
2178                 dev_err(&pdev->dev,
2179                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2180                         vectors);
2181                 return vectors;
2182         }
2183         if (vectors < hdev->num_msi)
2184                 dev_warn(&hdev->pdev->dev,
2185                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2186                          hdev->num_msi, vectors);
2187
2188         hdev->num_msi = vectors;
2189         hdev->num_msi_left = vectors;
2190         hdev->base_msi_vector = pdev->irq;
2191         hdev->roce_base_vector = hdev->base_msi_vector +
2192                                 hdev->roce_base_msix_offset;
2193
2194         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2195                                            sizeof(u16), GFP_KERNEL);
2196         if (!hdev->vector_status) {
2197                 pci_free_irq_vectors(pdev);
2198                 return -ENOMEM;
2199         }
2200
2201         for (i = 0; i < hdev->num_msi; i++)
2202                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2203
2204         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2205                                         sizeof(int), GFP_KERNEL);
2206         if (!hdev->vector_irq) {
2207                 pci_free_irq_vectors(pdev);
2208                 return -ENOMEM;
2209         }
2210
2211         return 0;
2212 }
2213
2214 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2215 {
2216         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2217                 duplex = HCLGE_MAC_FULL;
2218
2219         return duplex;
2220 }
2221
2222 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2223                                       u8 duplex)
2224 {
2225         struct hclge_config_mac_speed_dup_cmd *req;
2226         struct hclge_desc desc;
2227         int ret;
2228
2229         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2230
2231         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2232
2233         if (duplex)
2234                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2235
2236         switch (speed) {
2237         case HCLGE_MAC_SPEED_10M:
2238                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2239                                 HCLGE_CFG_SPEED_S, 6);
2240                 break;
2241         case HCLGE_MAC_SPEED_100M:
2242                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2243                                 HCLGE_CFG_SPEED_S, 7);
2244                 break;
2245         case HCLGE_MAC_SPEED_1G:
2246                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2247                                 HCLGE_CFG_SPEED_S, 0);
2248                 break;
2249         case HCLGE_MAC_SPEED_10G:
2250                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2251                                 HCLGE_CFG_SPEED_S, 1);
2252                 break;
2253         case HCLGE_MAC_SPEED_25G:
2254                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2255                                 HCLGE_CFG_SPEED_S, 2);
2256                 break;
2257         case HCLGE_MAC_SPEED_40G:
2258                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2259                                 HCLGE_CFG_SPEED_S, 3);
2260                 break;
2261         case HCLGE_MAC_SPEED_50G:
2262                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2263                                 HCLGE_CFG_SPEED_S, 4);
2264                 break;
2265         case HCLGE_MAC_SPEED_100G:
2266                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2267                                 HCLGE_CFG_SPEED_S, 5);
2268                 break;
2269         default:
2270                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2271                 return -EINVAL;
2272         }
2273
2274         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2275                       1);
2276
2277         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2278         if (ret) {
2279                 dev_err(&hdev->pdev->dev,
2280                         "mac speed/duplex config cmd failed %d.\n", ret);
2281                 return ret;
2282         }
2283
2284         return 0;
2285 }
2286
2287 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2288 {
2289         int ret;
2290
2291         duplex = hclge_check_speed_dup(duplex, speed);
2292         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2293                 return 0;
2294
2295         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2296         if (ret)
2297                 return ret;
2298
2299         hdev->hw.mac.speed = speed;
2300         hdev->hw.mac.duplex = duplex;
2301
2302         return 0;
2303 }
2304
2305 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2306                                      u8 duplex)
2307 {
2308         struct hclge_vport *vport = hclge_get_vport(handle);
2309         struct hclge_dev *hdev = vport->back;
2310
2311         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2312 }
2313
2314 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2315 {
2316         struct hclge_config_auto_neg_cmd *req;
2317         struct hclge_desc desc;
2318         u32 flag = 0;
2319         int ret;
2320
2321         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2322
2323         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2324         if (enable)
2325                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2326         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2327
2328         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2329         if (ret)
2330                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2331                         ret);
2332
2333         return ret;
2334 }
2335
2336 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2337 {
2338         struct hclge_vport *vport = hclge_get_vport(handle);
2339         struct hclge_dev *hdev = vport->back;
2340
2341         if (!hdev->hw.mac.support_autoneg) {
2342                 if (enable) {
2343                         dev_err(&hdev->pdev->dev,
2344                                 "autoneg is not supported by current port\n");
2345                         return -EOPNOTSUPP;
2346                 } else {
2347                         return 0;
2348                 }
2349         }
2350
2351         return hclge_set_autoneg_en(hdev, enable);
2352 }
2353
2354 static int hclge_get_autoneg(struct hnae3_handle *handle)
2355 {
2356         struct hclge_vport *vport = hclge_get_vport(handle);
2357         struct hclge_dev *hdev = vport->back;
2358         struct phy_device *phydev = hdev->hw.mac.phydev;
2359
2360         if (phydev)
2361                 return phydev->autoneg;
2362
2363         return hdev->hw.mac.autoneg;
2364 }
2365
2366 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2367 {
2368         struct hclge_vport *vport = hclge_get_vport(handle);
2369         struct hclge_dev *hdev = vport->back;
2370         int ret;
2371
2372         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2373
2374         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2375         if (ret)
2376                 return ret;
2377         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2378 }
2379
2380 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2381 {
2382         struct hclge_vport *vport = hclge_get_vport(handle);
2383         struct hclge_dev *hdev = vport->back;
2384
2385         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2386                 return hclge_set_autoneg_en(hdev, !halt);
2387
2388         return 0;
2389 }
2390
2391 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2392 {
2393         struct hclge_config_fec_cmd *req;
2394         struct hclge_desc desc;
2395         int ret;
2396
2397         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2398
2399         req = (struct hclge_config_fec_cmd *)desc.data;
2400         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2401                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2402         if (fec_mode & BIT(HNAE3_FEC_RS))
2403                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2404                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2405         if (fec_mode & BIT(HNAE3_FEC_BASER))
2406                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2407                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2408
2409         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2410         if (ret)
2411                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2412
2413         return ret;
2414 }
2415
2416 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2417 {
2418         struct hclge_vport *vport = hclge_get_vport(handle);
2419         struct hclge_dev *hdev = vport->back;
2420         struct hclge_mac *mac = &hdev->hw.mac;
2421         int ret;
2422
2423         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2424                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2425                 return -EINVAL;
2426         }
2427
2428         ret = hclge_set_fec_hw(hdev, fec_mode);
2429         if (ret)
2430                 return ret;
2431
2432         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2433         return 0;
2434 }
2435
2436 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2437                           u8 *fec_mode)
2438 {
2439         struct hclge_vport *vport = hclge_get_vport(handle);
2440         struct hclge_dev *hdev = vport->back;
2441         struct hclge_mac *mac = &hdev->hw.mac;
2442
2443         if (fec_ability)
2444                 *fec_ability = mac->fec_ability;
2445         if (fec_mode)
2446                 *fec_mode = mac->fec_mode;
2447 }
2448
2449 static int hclge_mac_init(struct hclge_dev *hdev)
2450 {
2451         struct hclge_mac *mac = &hdev->hw.mac;
2452         int ret;
2453
2454         hdev->support_sfp_query = true;
2455         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2456         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2457                                          hdev->hw.mac.duplex);
2458         if (ret) {
2459                 dev_err(&hdev->pdev->dev,
2460                         "Config mac speed dup fail ret=%d\n", ret);
2461                 return ret;
2462         }
2463
2464         if (hdev->hw.mac.support_autoneg) {
2465                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2466                 if (ret) {
2467                         dev_err(&hdev->pdev->dev,
2468                                 "Config mac autoneg fail ret=%d\n", ret);
2469                         return ret;
2470                 }
2471         }
2472
2473         mac->link = 0;
2474
2475         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2476                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2477                 if (ret) {
2478                         dev_err(&hdev->pdev->dev,
2479                                 "Fec mode init fail, ret = %d\n", ret);
2480                         return ret;
2481                 }
2482         }
2483
2484         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2485         if (ret) {
2486                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2487                 return ret;
2488         }
2489
2490         ret = hclge_buffer_alloc(hdev);
2491         if (ret)
2492                 dev_err(&hdev->pdev->dev,
2493                         "allocate buffer fail, ret=%d\n", ret);
2494
2495         return ret;
2496 }
2497
2498 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2499 {
2500         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2501             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2502                 schedule_work(&hdev->mbx_service_task);
2503 }
2504
2505 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2506 {
2507         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2508             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2509                 schedule_work(&hdev->rst_service_task);
2510 }
2511
2512 static void hclge_task_schedule(struct hclge_dev *hdev)
2513 {
2514         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2515             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2516             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2517                 (void)schedule_work(&hdev->service_task);
2518 }
2519
2520 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2521 {
2522         struct hclge_link_status_cmd *req;
2523         struct hclge_desc desc;
2524         int link_status;
2525         int ret;
2526
2527         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2528         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2529         if (ret) {
2530                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2531                         ret);
2532                 return ret;
2533         }
2534
2535         req = (struct hclge_link_status_cmd *)desc.data;
2536         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2537
2538         return !!link_status;
2539 }
2540
2541 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2542 {
2543         unsigned int mac_state;
2544         int link_stat;
2545
2546         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2547                 return 0;
2548
2549         mac_state = hclge_get_mac_link_status(hdev);
2550
2551         if (hdev->hw.mac.phydev) {
2552                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2553                         link_stat = mac_state &
2554                                 hdev->hw.mac.phydev->link;
2555                 else
2556                         link_stat = 0;
2557
2558         } else {
2559                 link_stat = mac_state;
2560         }
2561
2562         return !!link_stat;
2563 }
2564
2565 static void hclge_update_link_status(struct hclge_dev *hdev)
2566 {
2567         struct hnae3_client *rclient = hdev->roce_client;
2568         struct hnae3_client *client = hdev->nic_client;
2569         struct hnae3_handle *rhandle;
2570         struct hnae3_handle *handle;
2571         int state;
2572         int i;
2573
2574         if (!client)
2575                 return;
2576         state = hclge_get_mac_phy_link(hdev);
2577         if (state != hdev->hw.mac.link) {
2578                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2579                         handle = &hdev->vport[i].nic;
2580                         client->ops->link_status_change(handle, state);
2581                         hclge_config_mac_tnl_int(hdev, state);
2582                         rhandle = &hdev->vport[i].roce;
2583                         if (rclient && rclient->ops->link_status_change)
2584                                 rclient->ops->link_status_change(rhandle,
2585                                                                  state);
2586                 }
2587                 hdev->hw.mac.link = state;
2588         }
2589 }
2590
2591 static void hclge_update_port_capability(struct hclge_mac *mac)
2592 {
2593         /* update fec ability by speed */
2594         hclge_convert_setting_fec(mac);
2595
2596         /* firmware can not identify back plane type, the media type
2597          * read from configuration can help deal it
2598          */
2599         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2600             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2601                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2602         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2603                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2604
2605         if (mac->support_autoneg == true) {
2606                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2607                 linkmode_copy(mac->advertising, mac->supported);
2608         } else {
2609                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2610                                    mac->supported);
2611                 linkmode_zero(mac->advertising);
2612         }
2613 }
2614
2615 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2616 {
2617         struct hclge_sfp_info_cmd *resp;
2618         struct hclge_desc desc;
2619         int ret;
2620
2621         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2622         resp = (struct hclge_sfp_info_cmd *)desc.data;
2623         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2624         if (ret == -EOPNOTSUPP) {
2625                 dev_warn(&hdev->pdev->dev,
2626                          "IMP do not support get SFP speed %d\n", ret);
2627                 return ret;
2628         } else if (ret) {
2629                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2630                 return ret;
2631         }
2632
2633         *speed = le32_to_cpu(resp->speed);
2634
2635         return 0;
2636 }
2637
2638 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2639 {
2640         struct hclge_sfp_info_cmd *resp;
2641         struct hclge_desc desc;
2642         int ret;
2643
2644         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2645         resp = (struct hclge_sfp_info_cmd *)desc.data;
2646
2647         resp->query_type = QUERY_ACTIVE_SPEED;
2648
2649         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2650         if (ret == -EOPNOTSUPP) {
2651                 dev_warn(&hdev->pdev->dev,
2652                          "IMP does not support get SFP info %d\n", ret);
2653                 return ret;
2654         } else if (ret) {
2655                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2656                 return ret;
2657         }
2658
2659         mac->speed = le32_to_cpu(resp->speed);
2660         /* if resp->speed_ability is 0, it means it's an old version
2661          * firmware, do not update these params
2662          */
2663         if (resp->speed_ability) {
2664                 mac->module_type = le32_to_cpu(resp->module_type);
2665                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2666                 mac->autoneg = resp->autoneg;
2667                 mac->support_autoneg = resp->autoneg_ability;
2668                 mac->speed_type = QUERY_ACTIVE_SPEED;
2669                 if (!resp->active_fec)
2670                         mac->fec_mode = 0;
2671                 else
2672                         mac->fec_mode = BIT(resp->active_fec);
2673         } else {
2674                 mac->speed_type = QUERY_SFP_SPEED;
2675         }
2676
2677         return 0;
2678 }
2679
2680 static int hclge_update_port_info(struct hclge_dev *hdev)
2681 {
2682         struct hclge_mac *mac = &hdev->hw.mac;
2683         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2684         int ret;
2685
2686         /* get the port info from SFP cmd if not copper port */
2687         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2688                 return 0;
2689
2690         /* if IMP does not support get SFP/qSFP info, return directly */
2691         if (!hdev->support_sfp_query)
2692                 return 0;
2693
2694         if (hdev->pdev->revision >= 0x21)
2695                 ret = hclge_get_sfp_info(hdev, mac);
2696         else
2697                 ret = hclge_get_sfp_speed(hdev, &speed);
2698
2699         if (ret == -EOPNOTSUPP) {
2700                 hdev->support_sfp_query = false;
2701                 return ret;
2702         } else if (ret) {
2703                 return ret;
2704         }
2705
2706         if (hdev->pdev->revision >= 0x21) {
2707                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2708                         hclge_update_port_capability(mac);
2709                         return 0;
2710                 }
2711                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2712                                                HCLGE_MAC_FULL);
2713         } else {
2714                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2715                         return 0; /* do nothing if no SFP */
2716
2717                 /* must config full duplex for SFP */
2718                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2719         }
2720 }
2721
2722 static int hclge_get_status(struct hnae3_handle *handle)
2723 {
2724         struct hclge_vport *vport = hclge_get_vport(handle);
2725         struct hclge_dev *hdev = vport->back;
2726
2727         hclge_update_link_status(hdev);
2728
2729         return hdev->hw.mac.link;
2730 }
2731
2732 static void hclge_service_timer(struct timer_list *t)
2733 {
2734         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2735
2736         mod_timer(&hdev->service_timer, jiffies + HZ);
2737         hdev->hw_stats.stats_timer++;
2738         hdev->fd_arfs_expire_timer++;
2739         hclge_task_schedule(hdev);
2740 }
2741
2742 static void hclge_service_complete(struct hclge_dev *hdev)
2743 {
2744         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2745
2746         /* Flush memory before next watchdog */
2747         smp_mb__before_atomic();
2748         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2749 }
2750
2751 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2752 {
2753         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2754
2755         /* fetch the events from their corresponding regs */
2756         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2757         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2758         msix_src_reg = hclge_read_dev(&hdev->hw,
2759                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2760
2761         /* Assumption: If by any chance reset and mailbox events are reported
2762          * together then we will only process reset event in this go and will
2763          * defer the processing of the mailbox events. Since, we would have not
2764          * cleared RX CMDQ event this time we would receive again another
2765          * interrupt from H/W just for the mailbox.
2766          */
2767
2768         /* check for vector0 reset event sources */
2769         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2770                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2771                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2772                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2773                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2774                 hdev->rst_stats.imp_rst_cnt++;
2775                 return HCLGE_VECTOR0_EVENT_RST;
2776         }
2777
2778         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2779                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2780                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2781                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2782                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2783                 hdev->rst_stats.global_rst_cnt++;
2784                 return HCLGE_VECTOR0_EVENT_RST;
2785         }
2786
2787         /* check for vector0 msix event source */
2788         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2789                 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2790                          msix_src_reg);
2791                 *clearval = msix_src_reg;
2792                 return HCLGE_VECTOR0_EVENT_ERR;
2793         }
2794
2795         /* check for vector0 mailbox(=CMDQ RX) event source */
2796         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2797                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2798                 *clearval = cmdq_src_reg;
2799                 return HCLGE_VECTOR0_EVENT_MBX;
2800         }
2801
2802         /* print other vector0 event source */
2803         dev_info(&hdev->pdev->dev,
2804                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
2805                  cmdq_src_reg, msix_src_reg);
2806         *clearval = msix_src_reg;
2807
2808         return HCLGE_VECTOR0_EVENT_OTHER;
2809 }
2810
2811 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2812                                     u32 regclr)
2813 {
2814         switch (event_type) {
2815         case HCLGE_VECTOR0_EVENT_RST:
2816                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2817                 break;
2818         case HCLGE_VECTOR0_EVENT_MBX:
2819                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2820                 break;
2821         default:
2822                 break;
2823         }
2824 }
2825
2826 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2827 {
2828         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2829                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2830                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2831                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2832         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2833 }
2834
2835 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2836 {
2837         writel(enable ? 1 : 0, vector->addr);
2838 }
2839
2840 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2841 {
2842         struct hclge_dev *hdev = data;
2843         u32 clearval = 0;
2844         u32 event_cause;
2845
2846         hclge_enable_vector(&hdev->misc_vector, false);
2847         event_cause = hclge_check_event_cause(hdev, &clearval);
2848
2849         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2850         switch (event_cause) {
2851         case HCLGE_VECTOR0_EVENT_ERR:
2852                 /* we do not know what type of reset is required now. This could
2853                  * only be decided after we fetch the type of errors which
2854                  * caused this event. Therefore, we will do below for now:
2855                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2856                  *    have defered type of reset to be used.
2857                  * 2. Schedule the reset serivce task.
2858                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2859                  *    will fetch the correct type of reset.  This would be done
2860                  *    by first decoding the types of errors.
2861                  */
2862                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2863                 /* fall through */
2864         case HCLGE_VECTOR0_EVENT_RST:
2865                 hclge_reset_task_schedule(hdev);
2866                 break;
2867         case HCLGE_VECTOR0_EVENT_MBX:
2868                 /* If we are here then,
2869                  * 1. Either we are not handling any mbx task and we are not
2870                  *    scheduled as well
2871                  *                        OR
2872                  * 2. We could be handling a mbx task but nothing more is
2873                  *    scheduled.
2874                  * In both cases, we should schedule mbx task as there are more
2875                  * mbx messages reported by this interrupt.
2876                  */
2877                 hclge_mbx_task_schedule(hdev);
2878                 break;
2879         default:
2880                 dev_warn(&hdev->pdev->dev,
2881                          "received unknown or unhandled event of vector0\n");
2882                 break;
2883         }
2884
2885         /* clear the source of interrupt if it is not cause by reset */
2886         if (!clearval ||
2887             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2888                 hclge_clear_event_cause(hdev, event_cause, clearval);
2889                 hclge_enable_vector(&hdev->misc_vector, true);
2890         }
2891
2892         return IRQ_HANDLED;
2893 }
2894
2895 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2896 {
2897         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2898                 dev_warn(&hdev->pdev->dev,
2899                          "vector(vector_id %d) has been freed.\n", vector_id);
2900                 return;
2901         }
2902
2903         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2904         hdev->num_msi_left += 1;
2905         hdev->num_msi_used -= 1;
2906 }
2907
2908 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2909 {
2910         struct hclge_misc_vector *vector = &hdev->misc_vector;
2911
2912         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2913
2914         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2915         hdev->vector_status[0] = 0;
2916
2917         hdev->num_msi_left -= 1;
2918         hdev->num_msi_used += 1;
2919 }
2920
2921 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2922 {
2923         int ret;
2924
2925         hclge_get_misc_vector(hdev);
2926
2927         /* this would be explicitly freed in the end */
2928         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2929                           0, "hclge_misc", hdev);
2930         if (ret) {
2931                 hclge_free_vector(hdev, 0);
2932                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2933                         hdev->misc_vector.vector_irq);
2934         }
2935
2936         return ret;
2937 }
2938
2939 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2940 {
2941         free_irq(hdev->misc_vector.vector_irq, hdev);
2942         hclge_free_vector(hdev, 0);
2943 }
2944
2945 int hclge_notify_client(struct hclge_dev *hdev,
2946                         enum hnae3_reset_notify_type type)
2947 {
2948         struct hnae3_client *client = hdev->nic_client;
2949         u16 i;
2950
2951         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
2952                 return 0;
2953
2954         if (!client->ops->reset_notify)
2955                 return -EOPNOTSUPP;
2956
2957         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2958                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2959                 int ret;
2960
2961                 ret = client->ops->reset_notify(handle, type);
2962                 if (ret) {
2963                         dev_err(&hdev->pdev->dev,
2964                                 "notify nic client failed %d(%d)\n", type, ret);
2965                         return ret;
2966                 }
2967         }
2968
2969         return 0;
2970 }
2971
2972 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2973                                     enum hnae3_reset_notify_type type)
2974 {
2975         struct hnae3_client *client = hdev->roce_client;
2976         int ret = 0;
2977         u16 i;
2978
2979         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
2980                 return 0;
2981
2982         if (!client->ops->reset_notify)
2983                 return -EOPNOTSUPP;
2984
2985         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2986                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2987
2988                 ret = client->ops->reset_notify(handle, type);
2989                 if (ret) {
2990                         dev_err(&hdev->pdev->dev,
2991                                 "notify roce client failed %d(%d)",
2992                                 type, ret);
2993                         return ret;
2994                 }
2995         }
2996
2997         return ret;
2998 }
2999
3000 static int hclge_reset_wait(struct hclge_dev *hdev)
3001 {
3002 #define HCLGE_RESET_WATI_MS     100
3003 #define HCLGE_RESET_WAIT_CNT    200
3004         u32 val, reg, reg_bit;
3005         u32 cnt = 0;
3006
3007         switch (hdev->reset_type) {
3008         case HNAE3_IMP_RESET:
3009                 reg = HCLGE_GLOBAL_RESET_REG;
3010                 reg_bit = HCLGE_IMP_RESET_BIT;
3011                 break;
3012         case HNAE3_GLOBAL_RESET:
3013                 reg = HCLGE_GLOBAL_RESET_REG;
3014                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3015                 break;
3016         case HNAE3_FUNC_RESET:
3017                 reg = HCLGE_FUN_RST_ING;
3018                 reg_bit = HCLGE_FUN_RST_ING_B;
3019                 break;
3020         case HNAE3_FLR_RESET:
3021                 break;
3022         default:
3023                 dev_err(&hdev->pdev->dev,
3024                         "Wait for unsupported reset type: %d\n",
3025                         hdev->reset_type);
3026                 return -EINVAL;
3027         }
3028
3029         if (hdev->reset_type == HNAE3_FLR_RESET) {
3030                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3031                        cnt++ < HCLGE_RESET_WAIT_CNT)
3032                         msleep(HCLGE_RESET_WATI_MS);
3033
3034                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3035                         dev_err(&hdev->pdev->dev,
3036                                 "flr wait timeout: %d\n", cnt);
3037                         return -EBUSY;
3038                 }
3039
3040                 return 0;
3041         }
3042
3043         val = hclge_read_dev(&hdev->hw, reg);
3044         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3045                 msleep(HCLGE_RESET_WATI_MS);
3046                 val = hclge_read_dev(&hdev->hw, reg);
3047                 cnt++;
3048         }
3049
3050         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3051                 dev_warn(&hdev->pdev->dev,
3052                          "Wait for reset timeout: %d\n", hdev->reset_type);
3053                 return -EBUSY;
3054         }
3055
3056         return 0;
3057 }
3058
3059 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3060 {
3061         struct hclge_vf_rst_cmd *req;
3062         struct hclge_desc desc;
3063
3064         req = (struct hclge_vf_rst_cmd *)desc.data;
3065         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3066         req->dest_vfid = func_id;
3067
3068         if (reset)
3069                 req->vf_rst = 0x1;
3070
3071         return hclge_cmd_send(&hdev->hw, &desc, 1);
3072 }
3073
3074 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3075 {
3076         int i;
3077
3078         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3079                 struct hclge_vport *vport = &hdev->vport[i];
3080                 int ret;
3081
3082                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3083                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3084                 if (ret) {
3085                         dev_err(&hdev->pdev->dev,
3086                                 "set vf(%d) rst failed %d!\n",
3087                                 vport->vport_id, ret);
3088                         return ret;
3089                 }
3090
3091                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3092                         continue;
3093
3094                 /* Inform VF to process the reset.
3095                  * hclge_inform_reset_assert_to_vf may fail if VF
3096                  * driver is not loaded.
3097                  */
3098                 ret = hclge_inform_reset_assert_to_vf(vport);
3099                 if (ret)
3100                         dev_warn(&hdev->pdev->dev,
3101                                  "inform reset to vf(%d) failed %d!\n",
3102                                  vport->vport_id, ret);
3103         }
3104
3105         return 0;
3106 }
3107
3108 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3109 {
3110         struct hclge_desc desc;
3111         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3112         int ret;
3113
3114         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3115         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3116         req->fun_reset_vfid = func_id;
3117
3118         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3119         if (ret)
3120                 dev_err(&hdev->pdev->dev,
3121                         "send function reset cmd fail, status =%d\n", ret);
3122
3123         return ret;
3124 }
3125
3126 static void hclge_do_reset(struct hclge_dev *hdev)
3127 {
3128         struct hnae3_handle *handle = &hdev->vport[0].nic;
3129         struct pci_dev *pdev = hdev->pdev;
3130         u32 val;
3131
3132         if (hclge_get_hw_reset_stat(handle)) {
3133                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3134                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3135                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3136                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3137                 return;
3138         }
3139
3140         switch (hdev->reset_type) {
3141         case HNAE3_GLOBAL_RESET:
3142                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3143                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3144                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3145                 dev_info(&pdev->dev, "Global Reset requested\n");
3146                 break;
3147         case HNAE3_FUNC_RESET:
3148                 dev_info(&pdev->dev, "PF Reset requested\n");
3149                 /* schedule again to check later */
3150                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3151                 hclge_reset_task_schedule(hdev);
3152                 break;
3153         case HNAE3_FLR_RESET:
3154                 dev_info(&pdev->dev, "FLR requested\n");
3155                 /* schedule again to check later */
3156                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3157                 hclge_reset_task_schedule(hdev);
3158                 break;
3159         default:
3160                 dev_warn(&pdev->dev,
3161                          "Unsupported reset type: %d\n", hdev->reset_type);
3162                 break;
3163         }
3164 }
3165
3166 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3167                                                    unsigned long *addr)
3168 {
3169         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3170         struct hclge_dev *hdev = ae_dev->priv;
3171
3172         /* first, resolve any unknown reset type to the known type(s) */
3173         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3174                 /* we will intentionally ignore any errors from this function
3175                  *  as we will end up in *some* reset request in any case
3176                  */
3177                 hclge_handle_hw_msix_error(hdev, addr);
3178                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3179                 /* We defered the clearing of the error event which caused
3180                  * interrupt since it was not posssible to do that in
3181                  * interrupt context (and this is the reason we introduced
3182                  * new UNKNOWN reset type). Now, the errors have been
3183                  * handled and cleared in hardware we can safely enable
3184                  * interrupts. This is an exception to the norm.
3185                  */
3186                 hclge_enable_vector(&hdev->misc_vector, true);
3187         }
3188
3189         /* return the highest priority reset level amongst all */
3190         if (test_bit(HNAE3_IMP_RESET, addr)) {
3191                 rst_level = HNAE3_IMP_RESET;
3192                 clear_bit(HNAE3_IMP_RESET, addr);
3193                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3194                 clear_bit(HNAE3_FUNC_RESET, addr);
3195         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3196                 rst_level = HNAE3_GLOBAL_RESET;
3197                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3198                 clear_bit(HNAE3_FUNC_RESET, addr);
3199         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3200                 rst_level = HNAE3_FUNC_RESET;
3201                 clear_bit(HNAE3_FUNC_RESET, addr);
3202         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3203                 rst_level = HNAE3_FLR_RESET;
3204                 clear_bit(HNAE3_FLR_RESET, addr);
3205         }
3206
3207         if (hdev->reset_type != HNAE3_NONE_RESET &&
3208             rst_level < hdev->reset_type)
3209                 return HNAE3_NONE_RESET;
3210
3211         return rst_level;
3212 }
3213
3214 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3215 {
3216         u32 clearval = 0;
3217
3218         switch (hdev->reset_type) {
3219         case HNAE3_IMP_RESET:
3220                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3221                 break;
3222         case HNAE3_GLOBAL_RESET:
3223                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3224                 break;
3225         default:
3226                 break;
3227         }
3228
3229         if (!clearval)
3230                 return;
3231
3232         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3233         hclge_enable_vector(&hdev->misc_vector, true);
3234 }
3235
3236 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3237 {
3238         int ret = 0;
3239
3240         switch (hdev->reset_type) {
3241         case HNAE3_FUNC_RESET:
3242                 /* fall through */
3243         case HNAE3_FLR_RESET:
3244                 ret = hclge_set_all_vf_rst(hdev, true);
3245                 break;
3246         default:
3247                 break;
3248         }
3249
3250         return ret;
3251 }
3252
3253 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3254 {
3255 #define HCLGE_RESET_SYNC_TIME 100
3256
3257         u32 reg_val;
3258         int ret = 0;
3259
3260         switch (hdev->reset_type) {
3261         case HNAE3_FUNC_RESET:
3262                 /* There is no mechanism for PF to know if VF has stopped IO
3263                  * for now, just wait 100 ms for VF to stop IO
3264                  */
3265                 msleep(HCLGE_RESET_SYNC_TIME);
3266                 ret = hclge_func_reset_cmd(hdev, 0);
3267                 if (ret) {
3268                         dev_err(&hdev->pdev->dev,
3269                                 "asserting function reset fail %d!\n", ret);
3270                         return ret;
3271                 }
3272
3273                 /* After performaning pf reset, it is not necessary to do the
3274                  * mailbox handling or send any command to firmware, because
3275                  * any mailbox handling or command to firmware is only valid
3276                  * after hclge_cmd_init is called.
3277                  */
3278                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3279                 hdev->rst_stats.pf_rst_cnt++;
3280                 break;
3281         case HNAE3_FLR_RESET:
3282                 /* There is no mechanism for PF to know if VF has stopped IO
3283                  * for now, just wait 100 ms for VF to stop IO
3284                  */
3285                 msleep(HCLGE_RESET_SYNC_TIME);
3286                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3287                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3288                 hdev->rst_stats.flr_rst_cnt++;
3289                 break;
3290         case HNAE3_IMP_RESET:
3291                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3292                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3293                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3294                 break;
3295         default:
3296                 break;
3297         }
3298
3299         /* inform hardware that preparatory work is done */
3300         msleep(HCLGE_RESET_SYNC_TIME);
3301         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3302                         HCLGE_NIC_CMQ_ENABLE);
3303         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3304
3305         return ret;
3306 }
3307
3308 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3309 {
3310 #define MAX_RESET_FAIL_CNT 5
3311
3312         if (hdev->reset_pending) {
3313                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3314                          hdev->reset_pending);
3315                 return true;
3316         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3317                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3318                     BIT(HCLGE_IMP_RESET_BIT))) {
3319                 dev_info(&hdev->pdev->dev,
3320                          "reset failed because IMP Reset is pending\n");
3321                 hclge_clear_reset_cause(hdev);
3322                 return false;
3323         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3324                 hdev->reset_fail_cnt++;
3325                 if (is_timeout) {
3326                         set_bit(hdev->reset_type, &hdev->reset_pending);
3327                         dev_info(&hdev->pdev->dev,
3328                                  "re-schedule to wait for hw reset done\n");
3329                         return true;
3330                 }
3331
3332                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3333                 hclge_clear_reset_cause(hdev);
3334                 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3335                 mod_timer(&hdev->reset_timer,
3336                           jiffies + HCLGE_RESET_INTERVAL);
3337
3338                 return false;
3339         }
3340
3341         hclge_clear_reset_cause(hdev);
3342         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3343         return false;
3344 }
3345
3346 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3347 {
3348         int ret = 0;
3349
3350         switch (hdev->reset_type) {
3351         case HNAE3_FUNC_RESET:
3352                 /* fall through */
3353         case HNAE3_FLR_RESET:
3354                 ret = hclge_set_all_vf_rst(hdev, false);
3355                 break;
3356         default:
3357                 break;
3358         }
3359
3360         return ret;
3361 }
3362
3363 static int hclge_reset_stack(struct hclge_dev *hdev)
3364 {
3365         int ret;
3366
3367         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3368         if (ret)
3369                 return ret;
3370
3371         ret = hclge_reset_ae_dev(hdev->ae_dev);
3372         if (ret)
3373                 return ret;
3374
3375         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3376         if (ret)
3377                 return ret;
3378
3379         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3380 }
3381
3382 static void hclge_reset(struct hclge_dev *hdev)
3383 {
3384         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3385         bool is_timeout = false;
3386         int ret;
3387
3388         /* Initialize ae_dev reset status as well, in case enet layer wants to
3389          * know if device is undergoing reset
3390          */
3391         ae_dev->reset_type = hdev->reset_type;
3392         hdev->rst_stats.reset_cnt++;
3393         /* perform reset of the stack & ae device for a client */
3394         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3395         if (ret)
3396                 goto err_reset;
3397
3398         ret = hclge_reset_prepare_down(hdev);
3399         if (ret)
3400                 goto err_reset;
3401
3402         rtnl_lock();
3403         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3404         if (ret)
3405                 goto err_reset_lock;
3406
3407         rtnl_unlock();
3408
3409         ret = hclge_reset_prepare_wait(hdev);
3410         if (ret)
3411                 goto err_reset;
3412
3413         if (hclge_reset_wait(hdev)) {
3414                 is_timeout = true;
3415                 goto err_reset;
3416         }
3417
3418         hdev->rst_stats.hw_reset_done_cnt++;
3419
3420         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3421         if (ret)
3422                 goto err_reset;
3423
3424         rtnl_lock();
3425
3426         ret = hclge_reset_stack(hdev);
3427         if (ret)
3428                 goto err_reset_lock;
3429
3430         hclge_clear_reset_cause(hdev);
3431
3432         ret = hclge_reset_prepare_up(hdev);
3433         if (ret)
3434                 goto err_reset_lock;
3435
3436         rtnl_unlock();
3437
3438         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3439         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3440          * times
3441          */
3442         if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3443                 goto err_reset;
3444
3445         rtnl_lock();
3446
3447         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3448         if (ret)
3449                 goto err_reset_lock;
3450
3451         rtnl_unlock();
3452
3453         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3454         if (ret)
3455                 goto err_reset;
3456
3457         hdev->last_reset_time = jiffies;
3458         hdev->reset_fail_cnt = 0;
3459         hdev->rst_stats.reset_done_cnt++;
3460         ae_dev->reset_type = HNAE3_NONE_RESET;
3461         del_timer(&hdev->reset_timer);
3462
3463         return;
3464
3465 err_reset_lock:
3466         rtnl_unlock();
3467 err_reset:
3468         if (hclge_reset_err_handle(hdev, is_timeout))
3469                 hclge_reset_task_schedule(hdev);
3470 }
3471
3472 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3473 {
3474         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3475         struct hclge_dev *hdev = ae_dev->priv;
3476
3477         /* We might end up getting called broadly because of 2 below cases:
3478          * 1. Recoverable error was conveyed through APEI and only way to bring
3479          *    normalcy is to reset.
3480          * 2. A new reset request from the stack due to timeout
3481          *
3482          * For the first case,error event might not have ae handle available.
3483          * check if this is a new reset request and we are not here just because
3484          * last reset attempt did not succeed and watchdog hit us again. We will
3485          * know this if last reset request did not occur very recently (watchdog
3486          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3487          * In case of new request we reset the "reset level" to PF reset.
3488          * And if it is a repeat reset request of the most recent one then we
3489          * want to make sure we throttle the reset request. Therefore, we will
3490          * not allow it again before 3*HZ times.
3491          */
3492         if (!handle)
3493                 handle = &hdev->vport[0].nic;
3494
3495         if (time_before(jiffies, (hdev->last_reset_time +
3496                                   HCLGE_RESET_INTERVAL)))
3497                 return;
3498         else if (hdev->default_reset_request)
3499                 hdev->reset_level =
3500                         hclge_get_reset_level(ae_dev,
3501                                               &hdev->default_reset_request);
3502         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3503                 hdev->reset_level = HNAE3_FUNC_RESET;
3504
3505         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3506                  hdev->reset_level);
3507
3508         /* request reset & schedule reset task */
3509         set_bit(hdev->reset_level, &hdev->reset_request);
3510         hclge_reset_task_schedule(hdev);
3511
3512         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3513                 hdev->reset_level++;
3514 }
3515
3516 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3517                                         enum hnae3_reset_type rst_type)
3518 {
3519         struct hclge_dev *hdev = ae_dev->priv;
3520
3521         set_bit(rst_type, &hdev->default_reset_request);
3522 }
3523
3524 static void hclge_reset_timer(struct timer_list *t)
3525 {
3526         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3527
3528         dev_info(&hdev->pdev->dev,
3529                  "triggering reset in reset timer\n");
3530         hclge_reset_event(hdev->pdev, NULL);
3531 }
3532
3533 static void hclge_reset_subtask(struct hclge_dev *hdev)
3534 {
3535         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3536
3537         /* check if there is any ongoing reset in the hardware. This status can
3538          * be checked from reset_pending. If there is then, we need to wait for
3539          * hardware to complete reset.
3540          *    a. If we are able to figure out in reasonable time that hardware
3541          *       has fully resetted then, we can proceed with driver, client
3542          *       reset.
3543          *    b. else, we can come back later to check this status so re-sched
3544          *       now.
3545          */
3546         hdev->last_reset_time = jiffies;
3547         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3548         if (hdev->reset_type != HNAE3_NONE_RESET)
3549                 hclge_reset(hdev);
3550
3551         /* check if we got any *new* reset requests to be honored */
3552         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3553         if (hdev->reset_type != HNAE3_NONE_RESET)
3554                 hclge_do_reset(hdev);
3555
3556         hdev->reset_type = HNAE3_NONE_RESET;
3557 }
3558
3559 static void hclge_reset_service_task(struct work_struct *work)
3560 {
3561         struct hclge_dev *hdev =
3562                 container_of(work, struct hclge_dev, rst_service_task);
3563
3564         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3565                 return;
3566
3567         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3568
3569         hclge_reset_subtask(hdev);
3570
3571         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3572 }
3573
3574 static void hclge_mailbox_service_task(struct work_struct *work)
3575 {
3576         struct hclge_dev *hdev =
3577                 container_of(work, struct hclge_dev, mbx_service_task);
3578
3579         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3580                 return;
3581
3582         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3583
3584         hclge_mbx_handler(hdev);
3585
3586         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3587 }
3588
3589 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3590 {
3591         int i;
3592
3593         /* start from vport 1 for PF is always alive */
3594         for (i = 1; i < hdev->num_alloc_vport; i++) {
3595                 struct hclge_vport *vport = &hdev->vport[i];
3596
3597                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3598                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3599
3600                 /* If vf is not alive, set to default value */
3601                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3602                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3603         }
3604 }
3605
3606 static void hclge_service_task(struct work_struct *work)
3607 {
3608         struct hclge_dev *hdev =
3609                 container_of(work, struct hclge_dev, service_task);
3610
3611         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3612                 hclge_update_stats_for_all(hdev);
3613                 hdev->hw_stats.stats_timer = 0;
3614         }
3615
3616         hclge_update_port_info(hdev);
3617         hclge_update_link_status(hdev);
3618         hclge_update_vport_alive(hdev);
3619         hclge_sync_vlan_filter(hdev);
3620         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3621                 hclge_rfs_filter_expire(hdev);
3622                 hdev->fd_arfs_expire_timer = 0;
3623         }
3624         hclge_service_complete(hdev);
3625 }
3626
3627 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3628 {
3629         /* VF handle has no client */
3630         if (!handle->client)
3631                 return container_of(handle, struct hclge_vport, nic);
3632         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3633                 return container_of(handle, struct hclge_vport, roce);
3634         else
3635                 return container_of(handle, struct hclge_vport, nic);
3636 }
3637
3638 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3639                             struct hnae3_vector_info *vector_info)
3640 {
3641         struct hclge_vport *vport = hclge_get_vport(handle);
3642         struct hnae3_vector_info *vector = vector_info;
3643         struct hclge_dev *hdev = vport->back;
3644         int alloc = 0;
3645         int i, j;
3646
3647         vector_num = min(hdev->num_msi_left, vector_num);
3648
3649         for (j = 0; j < vector_num; j++) {
3650                 for (i = 1; i < hdev->num_msi; i++) {
3651                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3652                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3653                                 vector->io_addr = hdev->hw.io_base +
3654                                         HCLGE_VECTOR_REG_BASE +
3655                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3656                                         vport->vport_id *
3657                                         HCLGE_VECTOR_VF_OFFSET;
3658                                 hdev->vector_status[i] = vport->vport_id;
3659                                 hdev->vector_irq[i] = vector->vector;
3660
3661                                 vector++;
3662                                 alloc++;
3663
3664                                 break;
3665                         }
3666                 }
3667         }
3668         hdev->num_msi_left -= alloc;
3669         hdev->num_msi_used += alloc;
3670
3671         return alloc;
3672 }
3673
3674 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3675 {
3676         int i;
3677
3678         for (i = 0; i < hdev->num_msi; i++)
3679                 if (vector == hdev->vector_irq[i])
3680                         return i;
3681
3682         return -EINVAL;
3683 }
3684
3685 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3686 {
3687         struct hclge_vport *vport = hclge_get_vport(handle);
3688         struct hclge_dev *hdev = vport->back;
3689         int vector_id;
3690
3691         vector_id = hclge_get_vector_index(hdev, vector);
3692         if (vector_id < 0) {
3693                 dev_err(&hdev->pdev->dev,
3694                         "Get vector index fail. vector_id =%d\n", vector_id);
3695                 return vector_id;
3696         }
3697
3698         hclge_free_vector(hdev, vector_id);
3699
3700         return 0;
3701 }
3702
3703 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3704 {
3705         return HCLGE_RSS_KEY_SIZE;
3706 }
3707
3708 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3709 {
3710         return HCLGE_RSS_IND_TBL_SIZE;
3711 }
3712
3713 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3714                                   const u8 hfunc, const u8 *key)
3715 {
3716         struct hclge_rss_config_cmd *req;
3717         unsigned int key_offset = 0;
3718         struct hclge_desc desc;
3719         int key_counts;
3720         int key_size;
3721         int ret;
3722
3723         key_counts = HCLGE_RSS_KEY_SIZE;
3724         req = (struct hclge_rss_config_cmd *)desc.data;
3725
3726         while (key_counts) {
3727                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3728                                            false);
3729
3730                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3731                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3732
3733                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3734                 memcpy(req->hash_key,
3735                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3736
3737                 key_counts -= key_size;
3738                 key_offset++;
3739                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3740                 if (ret) {
3741                         dev_err(&hdev->pdev->dev,
3742                                 "Configure RSS config fail, status = %d\n",
3743                                 ret);
3744                         return ret;
3745                 }
3746         }
3747         return 0;
3748 }
3749
3750 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3751 {
3752         struct hclge_rss_indirection_table_cmd *req;
3753         struct hclge_desc desc;
3754         int i, j;
3755         int ret;
3756
3757         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3758
3759         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3760                 hclge_cmd_setup_basic_desc
3761                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3762
3763                 req->start_table_index =
3764                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3765                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3766
3767                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3768                         req->rss_result[j] =
3769                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3770
3771                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3772                 if (ret) {
3773                         dev_err(&hdev->pdev->dev,
3774                                 "Configure rss indir table fail,status = %d\n",
3775                                 ret);
3776                         return ret;
3777                 }
3778         }
3779         return 0;
3780 }
3781
3782 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3783                                  u16 *tc_size, u16 *tc_offset)
3784 {
3785         struct hclge_rss_tc_mode_cmd *req;
3786         struct hclge_desc desc;
3787         int ret;
3788         int i;
3789
3790         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3791         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3792
3793         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3794                 u16 mode = 0;
3795
3796                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3797                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3798                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3799                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3800                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3801
3802                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3803         }
3804
3805         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3806         if (ret)
3807                 dev_err(&hdev->pdev->dev,
3808                         "Configure rss tc mode fail, status = %d\n", ret);
3809
3810         return ret;
3811 }
3812
3813 static void hclge_get_rss_type(struct hclge_vport *vport)
3814 {
3815         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3816             vport->rss_tuple_sets.ipv4_udp_en ||
3817             vport->rss_tuple_sets.ipv4_sctp_en ||
3818             vport->rss_tuple_sets.ipv6_tcp_en ||
3819             vport->rss_tuple_sets.ipv6_udp_en ||
3820             vport->rss_tuple_sets.ipv6_sctp_en)
3821                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3822         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3823                  vport->rss_tuple_sets.ipv6_fragment_en)
3824                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3825         else
3826                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3827 }
3828
3829 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3830 {
3831         struct hclge_rss_input_tuple_cmd *req;
3832         struct hclge_desc desc;
3833         int ret;
3834
3835         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3836
3837         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3838
3839         /* Get the tuple cfg from pf */
3840         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3841         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3842         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3843         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3844         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3845         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3846         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3847         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3848         hclge_get_rss_type(&hdev->vport[0]);
3849         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3850         if (ret)
3851                 dev_err(&hdev->pdev->dev,
3852                         "Configure rss input fail, status = %d\n", ret);
3853         return ret;
3854 }
3855
3856 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3857                          u8 *key, u8 *hfunc)
3858 {
3859         struct hclge_vport *vport = hclge_get_vport(handle);
3860         int i;
3861
3862         /* Get hash algorithm */
3863         if (hfunc) {
3864                 switch (vport->rss_algo) {
3865                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3866                         *hfunc = ETH_RSS_HASH_TOP;
3867                         break;
3868                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3869                         *hfunc = ETH_RSS_HASH_XOR;
3870                         break;
3871                 default:
3872                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3873                         break;
3874                 }
3875         }
3876
3877         /* Get the RSS Key required by the user */
3878         if (key)
3879                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3880
3881         /* Get indirect table */
3882         if (indir)
3883                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3884                         indir[i] =  vport->rss_indirection_tbl[i];
3885
3886         return 0;
3887 }
3888
3889 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3890                          const  u8 *key, const  u8 hfunc)
3891 {
3892         struct hclge_vport *vport = hclge_get_vport(handle);
3893         struct hclge_dev *hdev = vport->back;
3894         u8 hash_algo;
3895         int ret, i;
3896
3897         /* Set the RSS Hash Key if specififed by the user */
3898         if (key) {
3899                 switch (hfunc) {
3900                 case ETH_RSS_HASH_TOP:
3901                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3902                         break;
3903                 case ETH_RSS_HASH_XOR:
3904                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3905                         break;
3906                 case ETH_RSS_HASH_NO_CHANGE:
3907                         hash_algo = vport->rss_algo;
3908                         break;
3909                 default:
3910                         return -EINVAL;
3911                 }
3912
3913                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3914                 if (ret)
3915                         return ret;
3916
3917                 /* Update the shadow RSS key with user specified qids */
3918                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3919                 vport->rss_algo = hash_algo;
3920         }
3921
3922         /* Update the shadow RSS table with user specified qids */
3923         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3924                 vport->rss_indirection_tbl[i] = indir[i];
3925
3926         /* Update the hardware */
3927         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3928 }
3929
3930 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3931 {
3932         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3933
3934         if (nfc->data & RXH_L4_B_2_3)
3935                 hash_sets |= HCLGE_D_PORT_BIT;
3936         else
3937                 hash_sets &= ~HCLGE_D_PORT_BIT;
3938
3939         if (nfc->data & RXH_IP_SRC)
3940                 hash_sets |= HCLGE_S_IP_BIT;
3941         else
3942                 hash_sets &= ~HCLGE_S_IP_BIT;
3943
3944         if (nfc->data & RXH_IP_DST)
3945                 hash_sets |= HCLGE_D_IP_BIT;
3946         else
3947                 hash_sets &= ~HCLGE_D_IP_BIT;
3948
3949         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3950                 hash_sets |= HCLGE_V_TAG_BIT;
3951
3952         return hash_sets;
3953 }
3954
3955 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3956                                struct ethtool_rxnfc *nfc)
3957 {
3958         struct hclge_vport *vport = hclge_get_vport(handle);
3959         struct hclge_dev *hdev = vport->back;
3960         struct hclge_rss_input_tuple_cmd *req;
3961         struct hclge_desc desc;
3962         u8 tuple_sets;
3963         int ret;
3964
3965         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3966                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3967                 return -EINVAL;
3968
3969         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3970         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3971
3972         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3973         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3974         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3975         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3976         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3977         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3978         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3979         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3980
3981         tuple_sets = hclge_get_rss_hash_bits(nfc);
3982         switch (nfc->flow_type) {
3983         case TCP_V4_FLOW:
3984                 req->ipv4_tcp_en = tuple_sets;
3985                 break;
3986         case TCP_V6_FLOW:
3987                 req->ipv6_tcp_en = tuple_sets;
3988                 break;
3989         case UDP_V4_FLOW:
3990                 req->ipv4_udp_en = tuple_sets;
3991                 break;
3992         case UDP_V6_FLOW:
3993                 req->ipv6_udp_en = tuple_sets;
3994                 break;
3995         case SCTP_V4_FLOW:
3996                 req->ipv4_sctp_en = tuple_sets;
3997                 break;
3998         case SCTP_V6_FLOW:
3999                 if ((nfc->data & RXH_L4_B_0_1) ||
4000                     (nfc->data & RXH_L4_B_2_3))
4001                         return -EINVAL;
4002
4003                 req->ipv6_sctp_en = tuple_sets;
4004                 break;
4005         case IPV4_FLOW:
4006                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4007                 break;
4008         case IPV6_FLOW:
4009                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4010                 break;
4011         default:
4012                 return -EINVAL;
4013         }
4014
4015         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4016         if (ret) {
4017                 dev_err(&hdev->pdev->dev,
4018                         "Set rss tuple fail, status = %d\n", ret);
4019                 return ret;
4020         }
4021
4022         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4023         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4024         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4025         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4026         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4027         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4028         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4029         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4030         hclge_get_rss_type(vport);
4031         return 0;
4032 }
4033
4034 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4035                                struct ethtool_rxnfc *nfc)
4036 {
4037         struct hclge_vport *vport = hclge_get_vport(handle);
4038         u8 tuple_sets;
4039
4040         nfc->data = 0;
4041
4042         switch (nfc->flow_type) {
4043         case TCP_V4_FLOW:
4044                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4045                 break;
4046         case UDP_V4_FLOW:
4047                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4048                 break;
4049         case TCP_V6_FLOW:
4050                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4051                 break;
4052         case UDP_V6_FLOW:
4053                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4054                 break;
4055         case SCTP_V4_FLOW:
4056                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4057                 break;
4058         case SCTP_V6_FLOW:
4059                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4060                 break;
4061         case IPV4_FLOW:
4062         case IPV6_FLOW:
4063                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4064                 break;
4065         default:
4066                 return -EINVAL;
4067         }
4068
4069         if (!tuple_sets)
4070                 return 0;
4071
4072         if (tuple_sets & HCLGE_D_PORT_BIT)
4073                 nfc->data |= RXH_L4_B_2_3;
4074         if (tuple_sets & HCLGE_S_PORT_BIT)
4075                 nfc->data |= RXH_L4_B_0_1;
4076         if (tuple_sets & HCLGE_D_IP_BIT)
4077                 nfc->data |= RXH_IP_DST;
4078         if (tuple_sets & HCLGE_S_IP_BIT)
4079                 nfc->data |= RXH_IP_SRC;
4080
4081         return 0;
4082 }
4083
4084 static int hclge_get_tc_size(struct hnae3_handle *handle)
4085 {
4086         struct hclge_vport *vport = hclge_get_vport(handle);
4087         struct hclge_dev *hdev = vport->back;
4088
4089         return hdev->rss_size_max;
4090 }
4091
4092 int hclge_rss_init_hw(struct hclge_dev *hdev)
4093 {
4094         struct hclge_vport *vport = hdev->vport;
4095         u8 *rss_indir = vport[0].rss_indirection_tbl;
4096         u16 rss_size = vport[0].alloc_rss_size;
4097         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4098         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4099         u8 *key = vport[0].rss_hash_key;
4100         u8 hfunc = vport[0].rss_algo;
4101         u16 tc_valid[HCLGE_MAX_TC_NUM];
4102         u16 roundup_size;
4103         unsigned int i;
4104         int ret;
4105
4106         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4107         if (ret)
4108                 return ret;
4109
4110         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4111         if (ret)
4112                 return ret;
4113
4114         ret = hclge_set_rss_input_tuple(hdev);
4115         if (ret)
4116                 return ret;
4117
4118         /* Each TC have the same queue size, and tc_size set to hardware is
4119          * the log2 of roundup power of two of rss_size, the acutal queue
4120          * size is limited by indirection table.
4121          */
4122         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4123                 dev_err(&hdev->pdev->dev,
4124                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4125                         rss_size);
4126                 return -EINVAL;
4127         }
4128
4129         roundup_size = roundup_pow_of_two(rss_size);
4130         roundup_size = ilog2(roundup_size);
4131
4132         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4133                 tc_valid[i] = 0;
4134
4135                 if (!(hdev->hw_tc_map & BIT(i)))
4136                         continue;
4137
4138                 tc_valid[i] = 1;
4139                 tc_size[i] = roundup_size;
4140                 tc_offset[i] = rss_size * i;
4141         }
4142
4143         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4144 }
4145
4146 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4147 {
4148         struct hclge_vport *vport = hdev->vport;
4149         int i, j;
4150
4151         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4152                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4153                         vport[j].rss_indirection_tbl[i] =
4154                                 i % vport[j].alloc_rss_size;
4155         }
4156 }
4157
4158 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4159 {
4160         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4161         struct hclge_vport *vport = hdev->vport;
4162
4163         if (hdev->pdev->revision >= 0x21)
4164                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4165
4166         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4167                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4168                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4169                 vport[i].rss_tuple_sets.ipv4_udp_en =
4170                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4171                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4172                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4173                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4174                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4175                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4176                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4177                 vport[i].rss_tuple_sets.ipv6_udp_en =
4178                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4179                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4180                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4181                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4182                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4183
4184                 vport[i].rss_algo = rss_algo;
4185
4186                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4187                        HCLGE_RSS_KEY_SIZE);
4188         }
4189
4190         hclge_rss_indir_init_cfg(hdev);
4191 }
4192
4193 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4194                                 int vector_id, bool en,
4195                                 struct hnae3_ring_chain_node *ring_chain)
4196 {
4197         struct hclge_dev *hdev = vport->back;
4198         struct hnae3_ring_chain_node *node;
4199         struct hclge_desc desc;
4200         struct hclge_ctrl_vector_chain_cmd *req
4201                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4202         enum hclge_cmd_status status;
4203         enum hclge_opcode_type op;
4204         u16 tqp_type_and_id;
4205         int i;
4206
4207         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4208         hclge_cmd_setup_basic_desc(&desc, op, false);
4209         req->int_vector_id = vector_id;
4210
4211         i = 0;
4212         for (node = ring_chain; node; node = node->next) {
4213                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4214                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4215                                 HCLGE_INT_TYPE_S,
4216                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4217                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4218                                 HCLGE_TQP_ID_S, node->tqp_index);
4219                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4220                                 HCLGE_INT_GL_IDX_S,
4221                                 hnae3_get_field(node->int_gl_idx,
4222                                                 HNAE3_RING_GL_IDX_M,
4223                                                 HNAE3_RING_GL_IDX_S));
4224                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4225                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4226                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4227                         req->vfid = vport->vport_id;
4228
4229                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4230                         if (status) {
4231                                 dev_err(&hdev->pdev->dev,
4232                                         "Map TQP fail, status is %d.\n",
4233                                         status);
4234                                 return -EIO;
4235                         }
4236                         i = 0;
4237
4238                         hclge_cmd_setup_basic_desc(&desc,
4239                                                    op,
4240                                                    false);
4241                         req->int_vector_id = vector_id;
4242                 }
4243         }
4244
4245         if (i > 0) {
4246                 req->int_cause_num = i;
4247                 req->vfid = vport->vport_id;
4248                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4249                 if (status) {
4250                         dev_err(&hdev->pdev->dev,
4251                                 "Map TQP fail, status is %d.\n", status);
4252                         return -EIO;
4253                 }
4254         }
4255
4256         return 0;
4257 }
4258
4259 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4260                                     struct hnae3_ring_chain_node *ring_chain)
4261 {
4262         struct hclge_vport *vport = hclge_get_vport(handle);
4263         struct hclge_dev *hdev = vport->back;
4264         int vector_id;
4265
4266         vector_id = hclge_get_vector_index(hdev, vector);
4267         if (vector_id < 0) {
4268                 dev_err(&hdev->pdev->dev,
4269                         "Get vector index fail. vector_id =%d\n", vector_id);
4270                 return vector_id;
4271         }
4272
4273         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4274 }
4275
4276 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4277                                        struct hnae3_ring_chain_node *ring_chain)
4278 {
4279         struct hclge_vport *vport = hclge_get_vport(handle);
4280         struct hclge_dev *hdev = vport->back;
4281         int vector_id, ret;
4282
4283         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4284                 return 0;
4285
4286         vector_id = hclge_get_vector_index(hdev, vector);
4287         if (vector_id < 0) {
4288                 dev_err(&handle->pdev->dev,
4289                         "Get vector index fail. ret =%d\n", vector_id);
4290                 return vector_id;
4291         }
4292
4293         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4294         if (ret)
4295                 dev_err(&handle->pdev->dev,
4296                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4297                         vector_id, ret);
4298
4299         return ret;
4300 }
4301
4302 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4303                                struct hclge_promisc_param *param)
4304 {
4305         struct hclge_promisc_cfg_cmd *req;
4306         struct hclge_desc desc;
4307         int ret;
4308
4309         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4310
4311         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4312         req->vf_id = param->vf_id;
4313
4314         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4315          * pdev revision(0x20), new revision support them. The
4316          * value of this two fields will not return error when driver
4317          * send command to fireware in revision(0x20).
4318          */
4319         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4320                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4321
4322         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4323         if (ret)
4324                 dev_err(&hdev->pdev->dev,
4325                         "Set promisc mode fail, status is %d.\n", ret);
4326
4327         return ret;
4328 }
4329
4330 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4331                               bool en_mc, bool en_bc, int vport_id)
4332 {
4333         if (!param)
4334                 return;
4335
4336         memset(param, 0, sizeof(struct hclge_promisc_param));
4337         if (en_uc)
4338                 param->enable = HCLGE_PROMISC_EN_UC;
4339         if (en_mc)
4340                 param->enable |= HCLGE_PROMISC_EN_MC;
4341         if (en_bc)
4342                 param->enable |= HCLGE_PROMISC_EN_BC;
4343         param->vf_id = vport_id;
4344 }
4345
4346 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4347                                   bool en_mc_pmc)
4348 {
4349         struct hclge_vport *vport = hclge_get_vport(handle);
4350         struct hclge_dev *hdev = vport->back;
4351         struct hclge_promisc_param param;
4352         bool en_bc_pmc = true;
4353
4354         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4355          * always bypassed. So broadcast promisc should be disabled until
4356          * user enable promisc mode
4357          */
4358         if (handle->pdev->revision == 0x20)
4359                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4360
4361         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4362                                  vport->vport_id);
4363         return hclge_cmd_set_promisc_mode(hdev, &param);
4364 }
4365
4366 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4367 {
4368         struct hclge_get_fd_mode_cmd *req;
4369         struct hclge_desc desc;
4370         int ret;
4371
4372         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4373
4374         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4375
4376         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4377         if (ret) {
4378                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4379                 return ret;
4380         }
4381
4382         *fd_mode = req->mode;
4383
4384         return ret;
4385 }
4386
4387 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4388                                    u32 *stage1_entry_num,
4389                                    u32 *stage2_entry_num,
4390                                    u16 *stage1_counter_num,
4391                                    u16 *stage2_counter_num)
4392 {
4393         struct hclge_get_fd_allocation_cmd *req;
4394         struct hclge_desc desc;
4395         int ret;
4396
4397         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4398
4399         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4400
4401         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4402         if (ret) {
4403                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4404                         ret);
4405                 return ret;
4406         }
4407
4408         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4409         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4410         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4411         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4412
4413         return ret;
4414 }
4415
4416 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4417 {
4418         struct hclge_set_fd_key_config_cmd *req;
4419         struct hclge_fd_key_cfg *stage;
4420         struct hclge_desc desc;
4421         int ret;
4422
4423         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4424
4425         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4426         stage = &hdev->fd_cfg.key_cfg[stage_num];
4427         req->stage = stage_num;
4428         req->key_select = stage->key_sel;
4429         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4430         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4431         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4432         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4433         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4434         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4435
4436         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4437         if (ret)
4438                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4439
4440         return ret;
4441 }
4442
4443 static int hclge_init_fd_config(struct hclge_dev *hdev)
4444 {
4445 #define LOW_2_WORDS             0x03
4446         struct hclge_fd_key_cfg *key_cfg;
4447         int ret;
4448
4449         if (!hnae3_dev_fd_supported(hdev))
4450                 return 0;
4451
4452         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4453         if (ret)
4454                 return ret;
4455
4456         switch (hdev->fd_cfg.fd_mode) {
4457         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4458                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4459                 break;
4460         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4461                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4462                 break;
4463         default:
4464                 dev_err(&hdev->pdev->dev,
4465                         "Unsupported flow director mode %d\n",
4466                         hdev->fd_cfg.fd_mode);
4467                 return -EOPNOTSUPP;
4468         }
4469
4470         hdev->fd_cfg.proto_support =
4471                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4472                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4473         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4474         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4475         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4476         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4477         key_cfg->outer_sipv6_word_en = 0;
4478         key_cfg->outer_dipv6_word_en = 0;
4479
4480         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4481                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4482                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4483                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4484
4485         /* If use max 400bit key, we can support tuples for ether type */
4486         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4487                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4488                 key_cfg->tuple_active |=
4489                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4490         }
4491
4492         /* roce_type is used to filter roce frames
4493          * dst_vport is used to specify the rule
4494          */
4495         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4496
4497         ret = hclge_get_fd_allocation(hdev,
4498                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4499                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4500                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4501                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4502         if (ret)
4503                 return ret;
4504
4505         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4506 }
4507
4508 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4509                                 int loc, u8 *key, bool is_add)
4510 {
4511         struct hclge_fd_tcam_config_1_cmd *req1;
4512         struct hclge_fd_tcam_config_2_cmd *req2;
4513         struct hclge_fd_tcam_config_3_cmd *req3;
4514         struct hclge_desc desc[3];
4515         int ret;
4516
4517         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4518         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4519         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4520         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4521         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4522
4523         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4524         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4525         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4526
4527         req1->stage = stage;
4528         req1->xy_sel = sel_x ? 1 : 0;
4529         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4530         req1->index = cpu_to_le32(loc);
4531         req1->entry_vld = sel_x ? is_add : 0;
4532
4533         if (key) {
4534                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4535                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4536                        sizeof(req2->tcam_data));
4537                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4538                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4539         }
4540
4541         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4542         if (ret)
4543                 dev_err(&hdev->pdev->dev,
4544                         "config tcam key fail, ret=%d\n",
4545                         ret);
4546
4547         return ret;
4548 }
4549
4550 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4551                               struct hclge_fd_ad_data *action)
4552 {
4553         struct hclge_fd_ad_config_cmd *req;
4554         struct hclge_desc desc;
4555         u64 ad_data = 0;
4556         int ret;
4557
4558         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4559
4560         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4561         req->index = cpu_to_le32(loc);
4562         req->stage = stage;
4563
4564         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4565                       action->write_rule_id_to_bd);
4566         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4567                         action->rule_id);
4568         ad_data <<= 32;
4569         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4570         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4571                       action->forward_to_direct_queue);
4572         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4573                         action->queue_id);
4574         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4575         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4576                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4577         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4578         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4579                         action->counter_id);
4580
4581         req->ad_data = cpu_to_le64(ad_data);
4582         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4583         if (ret)
4584                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4585
4586         return ret;
4587 }
4588
4589 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4590                                    struct hclge_fd_rule *rule)
4591 {
4592         u16 tmp_x_s, tmp_y_s;
4593         u32 tmp_x_l, tmp_y_l;
4594         int i;
4595
4596         if (rule->unused_tuple & tuple_bit)
4597                 return true;
4598
4599         switch (tuple_bit) {
4600         case 0:
4601                 return false;
4602         case BIT(INNER_DST_MAC):
4603                 for (i = 0; i < ETH_ALEN; i++) {
4604                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4605                                rule->tuples_mask.dst_mac[i]);
4606                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4607                                rule->tuples_mask.dst_mac[i]);
4608                 }
4609
4610                 return true;
4611         case BIT(INNER_SRC_MAC):
4612                 for (i = 0; i < ETH_ALEN; i++) {
4613                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4614                                rule->tuples.src_mac[i]);
4615                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4616                                rule->tuples.src_mac[i]);
4617                 }
4618
4619                 return true;
4620         case BIT(INNER_VLAN_TAG_FST):
4621                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4622                        rule->tuples_mask.vlan_tag1);
4623                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4624                        rule->tuples_mask.vlan_tag1);
4625                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4626                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4627
4628                 return true;
4629         case BIT(INNER_ETH_TYPE):
4630                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4631                        rule->tuples_mask.ether_proto);
4632                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4633                        rule->tuples_mask.ether_proto);
4634                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4635                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4636
4637                 return true;
4638         case BIT(INNER_IP_TOS):
4639                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4640                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4641
4642                 return true;
4643         case BIT(INNER_IP_PROTO):
4644                 calc_x(*key_x, rule->tuples.ip_proto,
4645                        rule->tuples_mask.ip_proto);
4646                 calc_y(*key_y, rule->tuples.ip_proto,
4647                        rule->tuples_mask.ip_proto);
4648
4649                 return true;
4650         case BIT(INNER_SRC_IP):
4651                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4652                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4653                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4654                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4655                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4656                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4657
4658                 return true;
4659         case BIT(INNER_DST_IP):
4660                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4661                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4662                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4663                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4664                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4665                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4666
4667                 return true;
4668         case BIT(INNER_SRC_PORT):
4669                 calc_x(tmp_x_s, rule->tuples.src_port,
4670                        rule->tuples_mask.src_port);
4671                 calc_y(tmp_y_s, rule->tuples.src_port,
4672                        rule->tuples_mask.src_port);
4673                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4674                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4675
4676                 return true;
4677         case BIT(INNER_DST_PORT):
4678                 calc_x(tmp_x_s, rule->tuples.dst_port,
4679                        rule->tuples_mask.dst_port);
4680                 calc_y(tmp_y_s, rule->tuples.dst_port,
4681                        rule->tuples_mask.dst_port);
4682                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4683                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4684
4685                 return true;
4686         default:
4687                 return false;
4688         }
4689 }
4690
4691 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4692                                  u8 vf_id, u8 network_port_id)
4693 {
4694         u32 port_number = 0;
4695
4696         if (port_type == HOST_PORT) {
4697                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4698                                 pf_id);
4699                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4700                                 vf_id);
4701                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4702         } else {
4703                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4704                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4705                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4706         }
4707
4708         return port_number;
4709 }
4710
4711 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4712                                        __le32 *key_x, __le32 *key_y,
4713                                        struct hclge_fd_rule *rule)
4714 {
4715         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4716         u8 cur_pos = 0, tuple_size, shift_bits;
4717         unsigned int i;
4718
4719         for (i = 0; i < MAX_META_DATA; i++) {
4720                 tuple_size = meta_data_key_info[i].key_length;
4721                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4722
4723                 switch (tuple_bit) {
4724                 case BIT(ROCE_TYPE):
4725                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4726                         cur_pos += tuple_size;
4727                         break;
4728                 case BIT(DST_VPORT):
4729                         port_number = hclge_get_port_number(HOST_PORT, 0,
4730                                                             rule->vf_id, 0);
4731                         hnae3_set_field(meta_data,
4732                                         GENMASK(cur_pos + tuple_size, cur_pos),
4733                                         cur_pos, port_number);
4734                         cur_pos += tuple_size;
4735                         break;
4736                 default:
4737                         break;
4738                 }
4739         }
4740
4741         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4742         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4743         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4744
4745         *key_x = cpu_to_le32(tmp_x << shift_bits);
4746         *key_y = cpu_to_le32(tmp_y << shift_bits);
4747 }
4748
4749 /* A complete key is combined with meta data key and tuple key.
4750  * Meta data key is stored at the MSB region, and tuple key is stored at
4751  * the LSB region, unused bits will be filled 0.
4752  */
4753 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4754                             struct hclge_fd_rule *rule)
4755 {
4756         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4757         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4758         u8 *cur_key_x, *cur_key_y;
4759         unsigned int i;
4760         int ret, tuple_size;
4761         u8 meta_data_region;
4762
4763         memset(key_x, 0, sizeof(key_x));
4764         memset(key_y, 0, sizeof(key_y));
4765         cur_key_x = key_x;
4766         cur_key_y = key_y;
4767
4768         for (i = 0 ; i < MAX_TUPLE; i++) {
4769                 bool tuple_valid;
4770                 u32 check_tuple;
4771
4772                 tuple_size = tuple_key_info[i].key_length / 8;
4773                 check_tuple = key_cfg->tuple_active & BIT(i);
4774
4775                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4776                                                      cur_key_y, rule);
4777                 if (tuple_valid) {
4778                         cur_key_x += tuple_size;
4779                         cur_key_y += tuple_size;
4780                 }
4781         }
4782
4783         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4784                         MAX_META_DATA_LENGTH / 8;
4785
4786         hclge_fd_convert_meta_data(key_cfg,
4787                                    (__le32 *)(key_x + meta_data_region),
4788                                    (__le32 *)(key_y + meta_data_region),
4789                                    rule);
4790
4791         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4792                                    true);
4793         if (ret) {
4794                 dev_err(&hdev->pdev->dev,
4795                         "fd key_y config fail, loc=%d, ret=%d\n",
4796                         rule->queue_id, ret);
4797                 return ret;
4798         }
4799
4800         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4801                                    true);
4802         if (ret)
4803                 dev_err(&hdev->pdev->dev,
4804                         "fd key_x config fail, loc=%d, ret=%d\n",
4805                         rule->queue_id, ret);
4806         return ret;
4807 }
4808
4809 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4810                                struct hclge_fd_rule *rule)
4811 {
4812         struct hclge_fd_ad_data ad_data;
4813
4814         ad_data.ad_id = rule->location;
4815
4816         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4817                 ad_data.drop_packet = true;
4818                 ad_data.forward_to_direct_queue = false;
4819                 ad_data.queue_id = 0;
4820         } else {
4821                 ad_data.drop_packet = false;
4822                 ad_data.forward_to_direct_queue = true;
4823                 ad_data.queue_id = rule->queue_id;
4824         }
4825
4826         ad_data.use_counter = false;
4827         ad_data.counter_id = 0;
4828
4829         ad_data.use_next_stage = false;
4830         ad_data.next_input_key = 0;
4831
4832         ad_data.write_rule_id_to_bd = true;
4833         ad_data.rule_id = rule->location;
4834
4835         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4836 }
4837
4838 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4839                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4840 {
4841         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4842         struct ethtool_usrip4_spec *usr_ip4_spec;
4843         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4844         struct ethtool_usrip6_spec *usr_ip6_spec;
4845         struct ethhdr *ether_spec;
4846
4847         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4848                 return -EINVAL;
4849
4850         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4851                 return -EOPNOTSUPP;
4852
4853         if ((fs->flow_type & FLOW_EXT) &&
4854             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4855                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4856                 return -EOPNOTSUPP;
4857         }
4858
4859         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4860         case SCTP_V4_FLOW:
4861         case TCP_V4_FLOW:
4862         case UDP_V4_FLOW:
4863                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4864                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4865
4866                 if (!tcp_ip4_spec->ip4src)
4867                         *unused |= BIT(INNER_SRC_IP);
4868
4869                 if (!tcp_ip4_spec->ip4dst)
4870                         *unused |= BIT(INNER_DST_IP);
4871
4872                 if (!tcp_ip4_spec->psrc)
4873                         *unused |= BIT(INNER_SRC_PORT);
4874
4875                 if (!tcp_ip4_spec->pdst)
4876                         *unused |= BIT(INNER_DST_PORT);
4877
4878                 if (!tcp_ip4_spec->tos)
4879                         *unused |= BIT(INNER_IP_TOS);
4880
4881                 break;
4882         case IP_USER_FLOW:
4883                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4884                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4885                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4886
4887                 if (!usr_ip4_spec->ip4src)
4888                         *unused |= BIT(INNER_SRC_IP);
4889
4890                 if (!usr_ip4_spec->ip4dst)
4891                         *unused |= BIT(INNER_DST_IP);
4892
4893                 if (!usr_ip4_spec->tos)
4894                         *unused |= BIT(INNER_IP_TOS);
4895
4896                 if (!usr_ip4_spec->proto)
4897                         *unused |= BIT(INNER_IP_PROTO);
4898
4899                 if (usr_ip4_spec->l4_4_bytes)
4900                         return -EOPNOTSUPP;
4901
4902                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4903                         return -EOPNOTSUPP;
4904
4905                 break;
4906         case SCTP_V6_FLOW:
4907         case TCP_V6_FLOW:
4908         case UDP_V6_FLOW:
4909                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4910                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4911                         BIT(INNER_IP_TOS);
4912
4913                 /* check whether src/dst ip address used */
4914                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4915                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4916                         *unused |= BIT(INNER_SRC_IP);
4917
4918                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4919                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4920                         *unused |= BIT(INNER_DST_IP);
4921
4922                 if (!tcp_ip6_spec->psrc)
4923                         *unused |= BIT(INNER_SRC_PORT);
4924
4925                 if (!tcp_ip6_spec->pdst)
4926                         *unused |= BIT(INNER_DST_PORT);
4927
4928                 if (tcp_ip6_spec->tclass)
4929                         return -EOPNOTSUPP;
4930
4931                 break;
4932         case IPV6_USER_FLOW:
4933                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4934                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4935                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4936                         BIT(INNER_DST_PORT);
4937
4938                 /* check whether src/dst ip address used */
4939                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4940                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4941                         *unused |= BIT(INNER_SRC_IP);
4942
4943                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4944                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4945                         *unused |= BIT(INNER_DST_IP);
4946
4947                 if (!usr_ip6_spec->l4_proto)
4948                         *unused |= BIT(INNER_IP_PROTO);
4949
4950                 if (usr_ip6_spec->tclass)
4951                         return -EOPNOTSUPP;
4952
4953                 if (usr_ip6_spec->l4_4_bytes)
4954                         return -EOPNOTSUPP;
4955
4956                 break;
4957         case ETHER_FLOW:
4958                 ether_spec = &fs->h_u.ether_spec;
4959                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4960                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4961                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4962
4963                 if (is_zero_ether_addr(ether_spec->h_source))
4964                         *unused |= BIT(INNER_SRC_MAC);
4965
4966                 if (is_zero_ether_addr(ether_spec->h_dest))
4967                         *unused |= BIT(INNER_DST_MAC);
4968
4969                 if (!ether_spec->h_proto)
4970                         *unused |= BIT(INNER_ETH_TYPE);
4971
4972                 break;
4973         default:
4974                 return -EOPNOTSUPP;
4975         }
4976
4977         if ((fs->flow_type & FLOW_EXT)) {
4978                 if (fs->h_ext.vlan_etype)
4979                         return -EOPNOTSUPP;
4980                 if (!fs->h_ext.vlan_tci)
4981                         *unused |= BIT(INNER_VLAN_TAG_FST);
4982
4983                 if (fs->m_ext.vlan_tci) {
4984                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4985                                 return -EINVAL;
4986                 }
4987         } else {
4988                 *unused |= BIT(INNER_VLAN_TAG_FST);
4989         }
4990
4991         if (fs->flow_type & FLOW_MAC_EXT) {
4992                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4993                         return -EOPNOTSUPP;
4994
4995                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4996                         *unused |= BIT(INNER_DST_MAC);
4997                 else
4998                         *unused &= ~(BIT(INNER_DST_MAC));
4999         }
5000
5001         return 0;
5002 }
5003
5004 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5005 {
5006         struct hclge_fd_rule *rule = NULL;
5007         struct hlist_node *node2;
5008
5009         spin_lock_bh(&hdev->fd_rule_lock);
5010         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5011                 if (rule->location >= location)
5012                         break;
5013         }
5014
5015         spin_unlock_bh(&hdev->fd_rule_lock);
5016
5017         return  rule && rule->location == location;
5018 }
5019
5020 /* make sure being called after lock up with fd_rule_lock */
5021 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5022                                      struct hclge_fd_rule *new_rule,
5023                                      u16 location,
5024                                      bool is_add)
5025 {
5026         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5027         struct hlist_node *node2;
5028
5029         if (is_add && !new_rule)
5030                 return -EINVAL;
5031
5032         hlist_for_each_entry_safe(rule, node2,
5033                                   &hdev->fd_rule_list, rule_node) {
5034                 if (rule->location >= location)
5035                         break;
5036                 parent = rule;
5037         }
5038
5039         if (rule && rule->location == location) {
5040                 hlist_del(&rule->rule_node);
5041                 kfree(rule);
5042                 hdev->hclge_fd_rule_num--;
5043
5044                 if (!is_add) {
5045                         if (!hdev->hclge_fd_rule_num)
5046                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5047                         clear_bit(location, hdev->fd_bmap);
5048
5049                         return 0;
5050                 }
5051         } else if (!is_add) {
5052                 dev_err(&hdev->pdev->dev,
5053                         "delete fail, rule %d is inexistent\n",
5054                         location);
5055                 return -EINVAL;
5056         }
5057
5058         INIT_HLIST_NODE(&new_rule->rule_node);
5059
5060         if (parent)
5061                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5062         else
5063                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5064
5065         set_bit(location, hdev->fd_bmap);
5066         hdev->hclge_fd_rule_num++;
5067         hdev->fd_active_type = new_rule->rule_type;
5068
5069         return 0;
5070 }
5071
5072 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5073                               struct ethtool_rx_flow_spec *fs,
5074                               struct hclge_fd_rule *rule)
5075 {
5076         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5077
5078         switch (flow_type) {
5079         case SCTP_V4_FLOW:
5080         case TCP_V4_FLOW:
5081         case UDP_V4_FLOW:
5082                 rule->tuples.src_ip[IPV4_INDEX] =
5083                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5084                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5085                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5086
5087                 rule->tuples.dst_ip[IPV4_INDEX] =
5088                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5089                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5090                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5091
5092                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5093                 rule->tuples_mask.src_port =
5094                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5095
5096                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5097                 rule->tuples_mask.dst_port =
5098                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5099
5100                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5101                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5102
5103                 rule->tuples.ether_proto = ETH_P_IP;
5104                 rule->tuples_mask.ether_proto = 0xFFFF;
5105
5106                 break;
5107         case IP_USER_FLOW:
5108                 rule->tuples.src_ip[IPV4_INDEX] =
5109                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5110                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5111                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5112
5113                 rule->tuples.dst_ip[IPV4_INDEX] =
5114                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5115                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5116                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5117
5118                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5119                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5120
5121                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5122                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5123
5124                 rule->tuples.ether_proto = ETH_P_IP;
5125                 rule->tuples_mask.ether_proto = 0xFFFF;
5126
5127                 break;
5128         case SCTP_V6_FLOW:
5129         case TCP_V6_FLOW:
5130         case UDP_V6_FLOW:
5131                 be32_to_cpu_array(rule->tuples.src_ip,
5132                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5133                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5134                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5135
5136                 be32_to_cpu_array(rule->tuples.dst_ip,
5137                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5138                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5139                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5140
5141                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5142                 rule->tuples_mask.src_port =
5143                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5144
5145                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5146                 rule->tuples_mask.dst_port =
5147                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5148
5149                 rule->tuples.ether_proto = ETH_P_IPV6;
5150                 rule->tuples_mask.ether_proto = 0xFFFF;
5151
5152                 break;
5153         case IPV6_USER_FLOW:
5154                 be32_to_cpu_array(rule->tuples.src_ip,
5155                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5156                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5157                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5158
5159                 be32_to_cpu_array(rule->tuples.dst_ip,
5160                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5161                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5162                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5163
5164                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5165                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5166
5167                 rule->tuples.ether_proto = ETH_P_IPV6;
5168                 rule->tuples_mask.ether_proto = 0xFFFF;
5169
5170                 break;
5171         case ETHER_FLOW:
5172                 ether_addr_copy(rule->tuples.src_mac,
5173                                 fs->h_u.ether_spec.h_source);
5174                 ether_addr_copy(rule->tuples_mask.src_mac,
5175                                 fs->m_u.ether_spec.h_source);
5176
5177                 ether_addr_copy(rule->tuples.dst_mac,
5178                                 fs->h_u.ether_spec.h_dest);
5179                 ether_addr_copy(rule->tuples_mask.dst_mac,
5180                                 fs->m_u.ether_spec.h_dest);
5181
5182                 rule->tuples.ether_proto =
5183                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5184                 rule->tuples_mask.ether_proto =
5185                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5186
5187                 break;
5188         default:
5189                 return -EOPNOTSUPP;
5190         }
5191
5192         switch (flow_type) {
5193         case SCTP_V4_FLOW:
5194         case SCTP_V6_FLOW:
5195                 rule->tuples.ip_proto = IPPROTO_SCTP;
5196                 rule->tuples_mask.ip_proto = 0xFF;
5197                 break;
5198         case TCP_V4_FLOW:
5199         case TCP_V6_FLOW:
5200                 rule->tuples.ip_proto = IPPROTO_TCP;
5201                 rule->tuples_mask.ip_proto = 0xFF;
5202                 break;
5203         case UDP_V4_FLOW:
5204         case UDP_V6_FLOW:
5205                 rule->tuples.ip_proto = IPPROTO_UDP;
5206                 rule->tuples_mask.ip_proto = 0xFF;
5207                 break;
5208         default:
5209                 break;
5210         }
5211
5212         if ((fs->flow_type & FLOW_EXT)) {
5213                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5214                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5215         }
5216
5217         if (fs->flow_type & FLOW_MAC_EXT) {
5218                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5219                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5220         }
5221
5222         return 0;
5223 }
5224
5225 /* make sure being called after lock up with fd_rule_lock */
5226 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5227                                 struct hclge_fd_rule *rule)
5228 {
5229         int ret;
5230
5231         if (!rule) {
5232                 dev_err(&hdev->pdev->dev,
5233                         "The flow director rule is NULL\n");
5234                 return -EINVAL;
5235         }
5236
5237         /* it will never fail here, so needn't to check return value */
5238         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5239
5240         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5241         if (ret)
5242                 goto clear_rule;
5243
5244         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5245         if (ret)
5246                 goto clear_rule;
5247
5248         return 0;
5249
5250 clear_rule:
5251         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5252         return ret;
5253 }
5254
5255 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5256                               struct ethtool_rxnfc *cmd)
5257 {
5258         struct hclge_vport *vport = hclge_get_vport(handle);
5259         struct hclge_dev *hdev = vport->back;
5260         u16 dst_vport_id = 0, q_index = 0;
5261         struct ethtool_rx_flow_spec *fs;
5262         struct hclge_fd_rule *rule;
5263         u32 unused = 0;
5264         u8 action;
5265         int ret;
5266
5267         if (!hnae3_dev_fd_supported(hdev))
5268                 return -EOPNOTSUPP;
5269
5270         if (!hdev->fd_en) {
5271                 dev_warn(&hdev->pdev->dev,
5272                          "Please enable flow director first\n");
5273                 return -EOPNOTSUPP;
5274         }
5275
5276         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5277
5278         ret = hclge_fd_check_spec(hdev, fs, &unused);
5279         if (ret) {
5280                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5281                 return ret;
5282         }
5283
5284         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5285                 action = HCLGE_FD_ACTION_DROP_PACKET;
5286         } else {
5287                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5288                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5289                 u16 tqps;
5290
5291                 if (vf > hdev->num_req_vfs) {
5292                         dev_err(&hdev->pdev->dev,
5293                                 "Error: vf id (%d) > max vf num (%d)\n",
5294                                 vf, hdev->num_req_vfs);
5295                         return -EINVAL;
5296                 }
5297
5298                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5299                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5300
5301                 if (ring >= tqps) {
5302                         dev_err(&hdev->pdev->dev,
5303                                 "Error: queue id (%d) > max tqp num (%d)\n",
5304                                 ring, tqps - 1);
5305                         return -EINVAL;
5306                 }
5307
5308                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5309                 q_index = ring;
5310         }
5311
5312         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5313         if (!rule)
5314                 return -ENOMEM;
5315
5316         ret = hclge_fd_get_tuple(hdev, fs, rule);
5317         if (ret) {
5318                 kfree(rule);
5319                 return ret;
5320         }
5321
5322         rule->flow_type = fs->flow_type;
5323
5324         rule->location = fs->location;
5325         rule->unused_tuple = unused;
5326         rule->vf_id = dst_vport_id;
5327         rule->queue_id = q_index;
5328         rule->action = action;
5329         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5330
5331         /* to avoid rule conflict, when user configure rule by ethtool,
5332          * we need to clear all arfs rules
5333          */
5334         hclge_clear_arfs_rules(handle);
5335
5336         spin_lock_bh(&hdev->fd_rule_lock);
5337         ret = hclge_fd_config_rule(hdev, rule);
5338
5339         spin_unlock_bh(&hdev->fd_rule_lock);
5340
5341         return ret;
5342 }
5343
5344 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5345                               struct ethtool_rxnfc *cmd)
5346 {
5347         struct hclge_vport *vport = hclge_get_vport(handle);
5348         struct hclge_dev *hdev = vport->back;
5349         struct ethtool_rx_flow_spec *fs;
5350         int ret;
5351
5352         if (!hnae3_dev_fd_supported(hdev))
5353                 return -EOPNOTSUPP;
5354
5355         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5356
5357         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5358                 return -EINVAL;
5359
5360         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5361                 dev_err(&hdev->pdev->dev,
5362                         "Delete fail, rule %d is inexistent\n", fs->location);
5363                 return -ENOENT;
5364         }
5365
5366         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5367                                    NULL, false);
5368         if (ret)
5369                 return ret;
5370
5371         spin_lock_bh(&hdev->fd_rule_lock);
5372         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5373
5374         spin_unlock_bh(&hdev->fd_rule_lock);
5375
5376         return ret;
5377 }
5378
5379 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5380                                      bool clear_list)
5381 {
5382         struct hclge_vport *vport = hclge_get_vport(handle);
5383         struct hclge_dev *hdev = vport->back;
5384         struct hclge_fd_rule *rule;
5385         struct hlist_node *node;
5386         u16 location;
5387
5388         if (!hnae3_dev_fd_supported(hdev))
5389                 return;
5390
5391         spin_lock_bh(&hdev->fd_rule_lock);
5392         for_each_set_bit(location, hdev->fd_bmap,
5393                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5394                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5395                                      NULL, false);
5396
5397         if (clear_list) {
5398                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5399                                           rule_node) {
5400                         hlist_del(&rule->rule_node);
5401                         kfree(rule);
5402                 }
5403                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5404                 hdev->hclge_fd_rule_num = 0;
5405                 bitmap_zero(hdev->fd_bmap,
5406                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5407         }
5408
5409         spin_unlock_bh(&hdev->fd_rule_lock);
5410 }
5411
5412 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5413 {
5414         struct hclge_vport *vport = hclge_get_vport(handle);
5415         struct hclge_dev *hdev = vport->back;
5416         struct hclge_fd_rule *rule;
5417         struct hlist_node *node;
5418         int ret;
5419
5420         /* Return ok here, because reset error handling will check this
5421          * return value. If error is returned here, the reset process will
5422          * fail.
5423          */
5424         if (!hnae3_dev_fd_supported(hdev))
5425                 return 0;
5426
5427         /* if fd is disabled, should not restore it when reset */
5428         if (!hdev->fd_en)
5429                 return 0;
5430
5431         spin_lock_bh(&hdev->fd_rule_lock);
5432         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5433                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5434                 if (!ret)
5435                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5436
5437                 if (ret) {
5438                         dev_warn(&hdev->pdev->dev,
5439                                  "Restore rule %d failed, remove it\n",
5440                                  rule->location);
5441                         clear_bit(rule->location, hdev->fd_bmap);
5442                         hlist_del(&rule->rule_node);
5443                         kfree(rule);
5444                         hdev->hclge_fd_rule_num--;
5445                 }
5446         }
5447
5448         if (hdev->hclge_fd_rule_num)
5449                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5450
5451         spin_unlock_bh(&hdev->fd_rule_lock);
5452
5453         return 0;
5454 }
5455
5456 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5457                                  struct ethtool_rxnfc *cmd)
5458 {
5459         struct hclge_vport *vport = hclge_get_vport(handle);
5460         struct hclge_dev *hdev = vport->back;
5461
5462         if (!hnae3_dev_fd_supported(hdev))
5463                 return -EOPNOTSUPP;
5464
5465         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5466         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5467
5468         return 0;
5469 }
5470
5471 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5472                                   struct ethtool_rxnfc *cmd)
5473 {
5474         struct hclge_vport *vport = hclge_get_vport(handle);
5475         struct hclge_fd_rule *rule = NULL;
5476         struct hclge_dev *hdev = vport->back;
5477         struct ethtool_rx_flow_spec *fs;
5478         struct hlist_node *node2;
5479
5480         if (!hnae3_dev_fd_supported(hdev))
5481                 return -EOPNOTSUPP;
5482
5483         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5484
5485         spin_lock_bh(&hdev->fd_rule_lock);
5486
5487         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5488                 if (rule->location >= fs->location)
5489                         break;
5490         }
5491
5492         if (!rule || fs->location != rule->location) {
5493                 spin_unlock_bh(&hdev->fd_rule_lock);
5494
5495                 return -ENOENT;
5496         }
5497
5498         fs->flow_type = rule->flow_type;
5499         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5500         case SCTP_V4_FLOW:
5501         case TCP_V4_FLOW:
5502         case UDP_V4_FLOW:
5503                 fs->h_u.tcp_ip4_spec.ip4src =
5504                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5505                 fs->m_u.tcp_ip4_spec.ip4src =
5506                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5507                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5508
5509                 fs->h_u.tcp_ip4_spec.ip4dst =
5510                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5511                 fs->m_u.tcp_ip4_spec.ip4dst =
5512                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5513                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5514
5515                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5516                 fs->m_u.tcp_ip4_spec.psrc =
5517                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5518                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5519
5520                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5521                 fs->m_u.tcp_ip4_spec.pdst =
5522                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5523                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5524
5525                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5526                 fs->m_u.tcp_ip4_spec.tos =
5527                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5528                                 0 : rule->tuples_mask.ip_tos;
5529
5530                 break;
5531         case IP_USER_FLOW:
5532                 fs->h_u.usr_ip4_spec.ip4src =
5533                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5534                 fs->m_u.tcp_ip4_spec.ip4src =
5535                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5536                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5537
5538                 fs->h_u.usr_ip4_spec.ip4dst =
5539                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5540                 fs->m_u.usr_ip4_spec.ip4dst =
5541                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5542                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5543
5544                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5545                 fs->m_u.usr_ip4_spec.tos =
5546                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5547                                 0 : rule->tuples_mask.ip_tos;
5548
5549                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5550                 fs->m_u.usr_ip4_spec.proto =
5551                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5552                                 0 : rule->tuples_mask.ip_proto;
5553
5554                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5555
5556                 break;
5557         case SCTP_V6_FLOW:
5558         case TCP_V6_FLOW:
5559         case UDP_V6_FLOW:
5560                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5561                                   rule->tuples.src_ip, IPV6_SIZE);
5562                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5563                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5564                                sizeof(int) * IPV6_SIZE);
5565                 else
5566                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5567                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5568
5569                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5570                                   rule->tuples.dst_ip, IPV6_SIZE);
5571                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5572                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5573                                sizeof(int) * IPV6_SIZE);
5574                 else
5575                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5576                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5577
5578                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5579                 fs->m_u.tcp_ip6_spec.psrc =
5580                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5581                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5582
5583                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5584                 fs->m_u.tcp_ip6_spec.pdst =
5585                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5586                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5587
5588                 break;
5589         case IPV6_USER_FLOW:
5590                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5591                                   rule->tuples.src_ip, IPV6_SIZE);
5592                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5593                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5594                                sizeof(int) * IPV6_SIZE);
5595                 else
5596                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5597                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5598
5599                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5600                                   rule->tuples.dst_ip, IPV6_SIZE);
5601                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5602                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5603                                sizeof(int) * IPV6_SIZE);
5604                 else
5605                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5606                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5607
5608                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5609                 fs->m_u.usr_ip6_spec.l4_proto =
5610                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5611                                 0 : rule->tuples_mask.ip_proto;
5612
5613                 break;
5614         case ETHER_FLOW:
5615                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5616                                 rule->tuples.src_mac);
5617                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5618                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5619                 else
5620                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5621                                         rule->tuples_mask.src_mac);
5622
5623                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5624                                 rule->tuples.dst_mac);
5625                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5626                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5627                 else
5628                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5629                                         rule->tuples_mask.dst_mac);
5630
5631                 fs->h_u.ether_spec.h_proto =
5632                                 cpu_to_be16(rule->tuples.ether_proto);
5633                 fs->m_u.ether_spec.h_proto =
5634                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5635                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5636
5637                 break;
5638         default:
5639                 spin_unlock_bh(&hdev->fd_rule_lock);
5640                 return -EOPNOTSUPP;
5641         }
5642
5643         if (fs->flow_type & FLOW_EXT) {
5644                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5645                 fs->m_ext.vlan_tci =
5646                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5647                                 cpu_to_be16(VLAN_VID_MASK) :
5648                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5649         }
5650
5651         if (fs->flow_type & FLOW_MAC_EXT) {
5652                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5653                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5654                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5655                 else
5656                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5657                                         rule->tuples_mask.dst_mac);
5658         }
5659
5660         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5661                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5662         } else {
5663                 u64 vf_id;
5664
5665                 fs->ring_cookie = rule->queue_id;
5666                 vf_id = rule->vf_id;
5667                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5668                 fs->ring_cookie |= vf_id;
5669         }
5670
5671         spin_unlock_bh(&hdev->fd_rule_lock);
5672
5673         return 0;
5674 }
5675
5676 static int hclge_get_all_rules(struct hnae3_handle *handle,
5677                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5678 {
5679         struct hclge_vport *vport = hclge_get_vport(handle);
5680         struct hclge_dev *hdev = vport->back;
5681         struct hclge_fd_rule *rule;
5682         struct hlist_node *node2;
5683         int cnt = 0;
5684
5685         if (!hnae3_dev_fd_supported(hdev))
5686                 return -EOPNOTSUPP;
5687
5688         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5689
5690         spin_lock_bh(&hdev->fd_rule_lock);
5691         hlist_for_each_entry_safe(rule, node2,
5692                                   &hdev->fd_rule_list, rule_node) {
5693                 if (cnt == cmd->rule_cnt) {
5694                         spin_unlock_bh(&hdev->fd_rule_lock);
5695                         return -EMSGSIZE;
5696                 }
5697
5698                 rule_locs[cnt] = rule->location;
5699                 cnt++;
5700         }
5701
5702         spin_unlock_bh(&hdev->fd_rule_lock);
5703
5704         cmd->rule_cnt = cnt;
5705
5706         return 0;
5707 }
5708
5709 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5710                                      struct hclge_fd_rule_tuples *tuples)
5711 {
5712         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5713         tuples->ip_proto = fkeys->basic.ip_proto;
5714         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5715
5716         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5717                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5718                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5719         } else {
5720                 memcpy(tuples->src_ip,
5721                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5722                        sizeof(tuples->src_ip));
5723                 memcpy(tuples->dst_ip,
5724                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5725                        sizeof(tuples->dst_ip));
5726         }
5727 }
5728
5729 /* traverse all rules, check whether an existed rule has the same tuples */
5730 static struct hclge_fd_rule *
5731 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5732                           const struct hclge_fd_rule_tuples *tuples)
5733 {
5734         struct hclge_fd_rule *rule = NULL;
5735         struct hlist_node *node;
5736
5737         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5738                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5739                         return rule;
5740         }
5741
5742         return NULL;
5743 }
5744
5745 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5746                                      struct hclge_fd_rule *rule)
5747 {
5748         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5749                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5750                              BIT(INNER_SRC_PORT);
5751         rule->action = 0;
5752         rule->vf_id = 0;
5753         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5754         if (tuples->ether_proto == ETH_P_IP) {
5755                 if (tuples->ip_proto == IPPROTO_TCP)
5756                         rule->flow_type = TCP_V4_FLOW;
5757                 else
5758                         rule->flow_type = UDP_V4_FLOW;
5759         } else {
5760                 if (tuples->ip_proto == IPPROTO_TCP)
5761                         rule->flow_type = TCP_V6_FLOW;
5762                 else
5763                         rule->flow_type = UDP_V6_FLOW;
5764         }
5765         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5766         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5767 }
5768
5769 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5770                                       u16 flow_id, struct flow_keys *fkeys)
5771 {
5772         struct hclge_vport *vport = hclge_get_vport(handle);
5773         struct hclge_fd_rule_tuples new_tuples;
5774         struct hclge_dev *hdev = vport->back;
5775         struct hclge_fd_rule *rule;
5776         u16 tmp_queue_id;
5777         u16 bit_id;
5778         int ret;
5779
5780         if (!hnae3_dev_fd_supported(hdev))
5781                 return -EOPNOTSUPP;
5782
5783         memset(&new_tuples, 0, sizeof(new_tuples));
5784         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5785
5786         spin_lock_bh(&hdev->fd_rule_lock);
5787
5788         /* when there is already fd rule existed add by user,
5789          * arfs should not work
5790          */
5791         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5792                 spin_unlock_bh(&hdev->fd_rule_lock);
5793
5794                 return -EOPNOTSUPP;
5795         }
5796
5797         /* check is there flow director filter existed for this flow,
5798          * if not, create a new filter for it;
5799          * if filter exist with different queue id, modify the filter;
5800          * if filter exist with same queue id, do nothing
5801          */
5802         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5803         if (!rule) {
5804                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5805                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5806                         spin_unlock_bh(&hdev->fd_rule_lock);
5807
5808                         return -ENOSPC;
5809                 }
5810
5811                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5812                 if (!rule) {
5813                         spin_unlock_bh(&hdev->fd_rule_lock);
5814
5815                         return -ENOMEM;
5816                 }
5817
5818                 set_bit(bit_id, hdev->fd_bmap);
5819                 rule->location = bit_id;
5820                 rule->flow_id = flow_id;
5821                 rule->queue_id = queue_id;
5822                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5823                 ret = hclge_fd_config_rule(hdev, rule);
5824
5825                 spin_unlock_bh(&hdev->fd_rule_lock);
5826
5827                 if (ret)
5828                         return ret;
5829
5830                 return rule->location;
5831         }
5832
5833         spin_unlock_bh(&hdev->fd_rule_lock);
5834
5835         if (rule->queue_id == queue_id)
5836                 return rule->location;
5837
5838         tmp_queue_id = rule->queue_id;
5839         rule->queue_id = queue_id;
5840         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5841         if (ret) {
5842                 rule->queue_id = tmp_queue_id;
5843                 return ret;
5844         }
5845
5846         return rule->location;
5847 }
5848
5849 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5850 {
5851 #ifdef CONFIG_RFS_ACCEL
5852         struct hnae3_handle *handle = &hdev->vport[0].nic;
5853         struct hclge_fd_rule *rule;
5854         struct hlist_node *node;
5855         HLIST_HEAD(del_list);
5856
5857         spin_lock_bh(&hdev->fd_rule_lock);
5858         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5859                 spin_unlock_bh(&hdev->fd_rule_lock);
5860                 return;
5861         }
5862         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5863                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5864                                         rule->flow_id, rule->location)) {
5865                         hlist_del_init(&rule->rule_node);
5866                         hlist_add_head(&rule->rule_node, &del_list);
5867                         hdev->hclge_fd_rule_num--;
5868                         clear_bit(rule->location, hdev->fd_bmap);
5869                 }
5870         }
5871         spin_unlock_bh(&hdev->fd_rule_lock);
5872
5873         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5874                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5875                                      rule->location, NULL, false);
5876                 kfree(rule);
5877         }
5878 #endif
5879 }
5880
5881 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5882 {
5883 #ifdef CONFIG_RFS_ACCEL
5884         struct hclge_vport *vport = hclge_get_vport(handle);
5885         struct hclge_dev *hdev = vport->back;
5886
5887         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5888                 hclge_del_all_fd_entries(handle, true);
5889 #endif
5890 }
5891
5892 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5893 {
5894         struct hclge_vport *vport = hclge_get_vport(handle);
5895         struct hclge_dev *hdev = vport->back;
5896
5897         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5898                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5899 }
5900
5901 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5902 {
5903         struct hclge_vport *vport = hclge_get_vport(handle);
5904         struct hclge_dev *hdev = vport->back;
5905
5906         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5907 }
5908
5909 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5910 {
5911         struct hclge_vport *vport = hclge_get_vport(handle);
5912         struct hclge_dev *hdev = vport->back;
5913
5914         return hdev->rst_stats.hw_reset_done_cnt;
5915 }
5916
5917 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5918 {
5919         struct hclge_vport *vport = hclge_get_vport(handle);
5920         struct hclge_dev *hdev = vport->back;
5921         bool clear;
5922
5923         hdev->fd_en = enable;
5924         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5925         if (!enable)
5926                 hclge_del_all_fd_entries(handle, clear);
5927         else
5928                 hclge_restore_fd_entries(handle);
5929 }
5930
5931 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5932 {
5933         struct hclge_desc desc;
5934         struct hclge_config_mac_mode_cmd *req =
5935                 (struct hclge_config_mac_mode_cmd *)desc.data;
5936         u32 loop_en = 0;
5937         int ret;
5938
5939         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5940
5941         if (enable) {
5942                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
5943                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
5944                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
5945                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
5946                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
5947                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
5948                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
5949                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
5950                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
5951                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
5952         }
5953
5954         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5955
5956         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5957         if (ret)
5958                 dev_err(&hdev->pdev->dev,
5959                         "mac enable fail, ret =%d.\n", ret);
5960 }
5961
5962 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5963 {
5964         struct hclge_config_mac_mode_cmd *req;
5965         struct hclge_desc desc;
5966         u32 loop_en;
5967         int ret;
5968
5969         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5970         /* 1 Read out the MAC mode config at first */
5971         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5972         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5973         if (ret) {
5974                 dev_err(&hdev->pdev->dev,
5975                         "mac loopback get fail, ret =%d.\n", ret);
5976                 return ret;
5977         }
5978
5979         /* 2 Then setup the loopback flag */
5980         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5981         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5982         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5983         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5984
5985         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5986
5987         /* 3 Config mac work mode with loopback flag
5988          * and its original configure parameters
5989          */
5990         hclge_cmd_reuse_desc(&desc, false);
5991         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5992         if (ret)
5993                 dev_err(&hdev->pdev->dev,
5994                         "mac loopback set fail, ret =%d.\n", ret);
5995         return ret;
5996 }
5997
5998 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5999                                      enum hnae3_loop loop_mode)
6000 {
6001 #define HCLGE_SERDES_RETRY_MS   10
6002 #define HCLGE_SERDES_RETRY_NUM  100
6003
6004 #define HCLGE_MAC_LINK_STATUS_MS   10
6005 #define HCLGE_MAC_LINK_STATUS_NUM  100
6006 #define HCLGE_MAC_LINK_STATUS_DOWN 0
6007 #define HCLGE_MAC_LINK_STATUS_UP   1
6008
6009         struct hclge_serdes_lb_cmd *req;
6010         struct hclge_desc desc;
6011         int mac_link_ret = 0;
6012         int ret, i = 0;
6013         u8 loop_mode_b;
6014
6015         req = (struct hclge_serdes_lb_cmd *)desc.data;
6016         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6017
6018         switch (loop_mode) {
6019         case HNAE3_LOOP_SERIAL_SERDES:
6020                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6021                 break;
6022         case HNAE3_LOOP_PARALLEL_SERDES:
6023                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6024                 break;
6025         default:
6026                 dev_err(&hdev->pdev->dev,
6027                         "unsupported serdes loopback mode %d\n", loop_mode);
6028                 return -ENOTSUPP;
6029         }
6030
6031         if (en) {
6032                 req->enable = loop_mode_b;
6033                 req->mask = loop_mode_b;
6034                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
6035         } else {
6036                 req->mask = loop_mode_b;
6037                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
6038         }
6039
6040         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6041         if (ret) {
6042                 dev_err(&hdev->pdev->dev,
6043                         "serdes loopback set fail, ret = %d\n", ret);
6044                 return ret;
6045         }
6046
6047         do {
6048                 msleep(HCLGE_SERDES_RETRY_MS);
6049                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6050                                            true);
6051                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6052                 if (ret) {
6053                         dev_err(&hdev->pdev->dev,
6054                                 "serdes loopback get, ret = %d\n", ret);
6055                         return ret;
6056                 }
6057         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6058                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6059
6060         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6061                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6062                 return -EBUSY;
6063         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6064                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6065                 return -EIO;
6066         }
6067
6068         hclge_cfg_mac_mode(hdev, en);
6069
6070         i = 0;
6071         do {
6072                 /* serdes Internal loopback, independent of the network cable.*/
6073                 msleep(HCLGE_MAC_LINK_STATUS_MS);
6074                 ret = hclge_get_mac_link_status(hdev);
6075                 if (ret == mac_link_ret)
6076                         return 0;
6077         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6078
6079         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6080
6081         return -EBUSY;
6082 }
6083
6084 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6085                             int stream_id, bool enable)
6086 {
6087         struct hclge_desc desc;
6088         struct hclge_cfg_com_tqp_queue_cmd *req =
6089                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6090         int ret;
6091
6092         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6093         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6094         req->stream_id = cpu_to_le16(stream_id);
6095         if (enable)
6096                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6097
6098         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6099         if (ret)
6100                 dev_err(&hdev->pdev->dev,
6101                         "Tqp enable fail, status =%d.\n", ret);
6102         return ret;
6103 }
6104
6105 static int hclge_set_loopback(struct hnae3_handle *handle,
6106                               enum hnae3_loop loop_mode, bool en)
6107 {
6108         struct hclge_vport *vport = hclge_get_vport(handle);
6109         struct hnae3_knic_private_info *kinfo;
6110         struct hclge_dev *hdev = vport->back;
6111         int i, ret;
6112
6113         switch (loop_mode) {
6114         case HNAE3_LOOP_APP:
6115                 ret = hclge_set_app_loopback(hdev, en);
6116                 break;
6117         case HNAE3_LOOP_SERIAL_SERDES:
6118         case HNAE3_LOOP_PARALLEL_SERDES:
6119                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6120                 break;
6121         default:
6122                 ret = -ENOTSUPP;
6123                 dev_err(&hdev->pdev->dev,
6124                         "loop_mode %d is not supported\n", loop_mode);
6125                 break;
6126         }
6127
6128         if (ret)
6129                 return ret;
6130
6131         kinfo = &vport->nic.kinfo;
6132         for (i = 0; i < kinfo->num_tqps; i++) {
6133                 ret = hclge_tqp_enable(hdev, i, 0, en);
6134                 if (ret)
6135                         return ret;
6136         }
6137
6138         return 0;
6139 }
6140
6141 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6142 {
6143         struct hclge_vport *vport = hclge_get_vport(handle);
6144         struct hnae3_knic_private_info *kinfo;
6145         struct hnae3_queue *queue;
6146         struct hclge_tqp *tqp;
6147         int i;
6148
6149         kinfo = &vport->nic.kinfo;
6150         for (i = 0; i < kinfo->num_tqps; i++) {
6151                 queue = handle->kinfo.tqp[i];
6152                 tqp = container_of(queue, struct hclge_tqp, q);
6153                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6154         }
6155 }
6156
6157 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6158 {
6159         struct hclge_vport *vport = hclge_get_vport(handle);
6160         struct hclge_dev *hdev = vport->back;
6161
6162         if (enable) {
6163                 mod_timer(&hdev->service_timer, jiffies + HZ);
6164         } else {
6165                 del_timer_sync(&hdev->service_timer);
6166                 cancel_work_sync(&hdev->service_task);
6167                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6168         }
6169 }
6170
6171 static int hclge_ae_start(struct hnae3_handle *handle)
6172 {
6173         struct hclge_vport *vport = hclge_get_vport(handle);
6174         struct hclge_dev *hdev = vport->back;
6175
6176         /* mac enable */
6177         hclge_cfg_mac_mode(hdev, true);
6178         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6179         hdev->hw.mac.link = 0;
6180
6181         /* reset tqp stats */
6182         hclge_reset_tqp_stats(handle);
6183
6184         hclge_mac_start_phy(hdev);
6185
6186         return 0;
6187 }
6188
6189 static void hclge_ae_stop(struct hnae3_handle *handle)
6190 {
6191         struct hclge_vport *vport = hclge_get_vport(handle);
6192         struct hclge_dev *hdev = vport->back;
6193         int i;
6194
6195         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6196
6197         hclge_clear_arfs_rules(handle);
6198
6199         /* If it is not PF reset, the firmware will disable the MAC,
6200          * so it only need to stop phy here.
6201          */
6202         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6203             hdev->reset_type != HNAE3_FUNC_RESET) {
6204                 hclge_mac_stop_phy(hdev);
6205                 return;
6206         }
6207
6208         for (i = 0; i < handle->kinfo.num_tqps; i++)
6209                 hclge_reset_tqp(handle, i);
6210
6211         /* Mac disable */
6212         hclge_cfg_mac_mode(hdev, false);
6213
6214         hclge_mac_stop_phy(hdev);
6215
6216         /* reset tqp stats */
6217         hclge_reset_tqp_stats(handle);
6218         hclge_update_link_status(hdev);
6219 }
6220
6221 int hclge_vport_start(struct hclge_vport *vport)
6222 {
6223         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6224         vport->last_active_jiffies = jiffies;
6225         return 0;
6226 }
6227
6228 void hclge_vport_stop(struct hclge_vport *vport)
6229 {
6230         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6231 }
6232
6233 static int hclge_client_start(struct hnae3_handle *handle)
6234 {
6235         struct hclge_vport *vport = hclge_get_vport(handle);
6236
6237         return hclge_vport_start(vport);
6238 }
6239
6240 static void hclge_client_stop(struct hnae3_handle *handle)
6241 {
6242         struct hclge_vport *vport = hclge_get_vport(handle);
6243
6244         hclge_vport_stop(vport);
6245 }
6246
6247 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6248                                          u16 cmdq_resp, u8  resp_code,
6249                                          enum hclge_mac_vlan_tbl_opcode op)
6250 {
6251         struct hclge_dev *hdev = vport->back;
6252         int return_status = -EIO;
6253
6254         if (cmdq_resp) {
6255                 dev_err(&hdev->pdev->dev,
6256                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6257                         cmdq_resp);
6258                 return -EIO;
6259         }
6260
6261         if (op == HCLGE_MAC_VLAN_ADD) {
6262                 if ((!resp_code) || (resp_code == 1)) {
6263                         return_status = 0;
6264                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6265                         return_status = -ENOSPC;
6266                         dev_err(&hdev->pdev->dev,
6267                                 "add mac addr failed for uc_overflow.\n");
6268                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6269                         return_status = -ENOSPC;
6270                         dev_err(&hdev->pdev->dev,
6271                                 "add mac addr failed for mc_overflow.\n");
6272                 } else {
6273                         dev_err(&hdev->pdev->dev,
6274                                 "add mac addr failed for undefined, code=%d.\n",
6275                                 resp_code);
6276                 }
6277         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6278                 if (!resp_code) {
6279                         return_status = 0;
6280                 } else if (resp_code == 1) {
6281                         return_status = -ENOENT;
6282                         dev_dbg(&hdev->pdev->dev,
6283                                 "remove mac addr failed for miss.\n");
6284                 } else {
6285                         dev_err(&hdev->pdev->dev,
6286                                 "remove mac addr failed for undefined, code=%d.\n",
6287                                 resp_code);
6288                 }
6289         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6290                 if (!resp_code) {
6291                         return_status = 0;
6292                 } else if (resp_code == 1) {
6293                         return_status = -ENOENT;
6294                         dev_dbg(&hdev->pdev->dev,
6295                                 "lookup mac addr failed for miss.\n");
6296                 } else {
6297                         dev_err(&hdev->pdev->dev,
6298                                 "lookup mac addr failed for undefined, code=%d.\n",
6299                                 resp_code);
6300                 }
6301         } else {
6302                 return_status = -EINVAL;
6303                 dev_err(&hdev->pdev->dev,
6304                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6305                         op);
6306         }
6307
6308         return return_status;
6309 }
6310
6311 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6312 {
6313 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6314
6315         unsigned int word_num;
6316         unsigned int bit_num;
6317
6318         if (vfid > 255 || vfid < 0)
6319                 return -EIO;
6320
6321         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6322                 word_num = vfid / 32;
6323                 bit_num  = vfid % 32;
6324                 if (clr)
6325                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6326                 else
6327                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6328         } else {
6329                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6330                 bit_num  = vfid % 32;
6331                 if (clr)
6332                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6333                 else
6334                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6335         }
6336
6337         return 0;
6338 }
6339
6340 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6341 {
6342 #define HCLGE_DESC_NUMBER 3
6343 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6344         int i, j;
6345
6346         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6347                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6348                         if (desc[i].data[j])
6349                                 return false;
6350
6351         return true;
6352 }
6353
6354 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6355                                    const u8 *addr, bool is_mc)
6356 {
6357         const unsigned char *mac_addr = addr;
6358         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6359                        (mac_addr[0]) | (mac_addr[1] << 8);
6360         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6361
6362         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6363         if (is_mc) {
6364                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6365                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6366         }
6367
6368         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6369         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6370 }
6371
6372 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6373                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6374 {
6375         struct hclge_dev *hdev = vport->back;
6376         struct hclge_desc desc;
6377         u8 resp_code;
6378         u16 retval;
6379         int ret;
6380
6381         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6382
6383         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6384
6385         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6386         if (ret) {
6387                 dev_err(&hdev->pdev->dev,
6388                         "del mac addr failed for cmd_send, ret =%d.\n",
6389                         ret);
6390                 return ret;
6391         }
6392         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6393         retval = le16_to_cpu(desc.retval);
6394
6395         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6396                                              HCLGE_MAC_VLAN_REMOVE);
6397 }
6398
6399 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6400                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6401                                      struct hclge_desc *desc,
6402                                      bool is_mc)
6403 {
6404         struct hclge_dev *hdev = vport->back;
6405         u8 resp_code;
6406         u16 retval;
6407         int ret;
6408
6409         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6410         if (is_mc) {
6411                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6412                 memcpy(desc[0].data,
6413                        req,
6414                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6415                 hclge_cmd_setup_basic_desc(&desc[1],
6416                                            HCLGE_OPC_MAC_VLAN_ADD,
6417                                            true);
6418                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6419                 hclge_cmd_setup_basic_desc(&desc[2],
6420                                            HCLGE_OPC_MAC_VLAN_ADD,
6421                                            true);
6422                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6423         } else {
6424                 memcpy(desc[0].data,
6425                        req,
6426                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6427                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6428         }
6429         if (ret) {
6430                 dev_err(&hdev->pdev->dev,
6431                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6432                         ret);
6433                 return ret;
6434         }
6435         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6436         retval = le16_to_cpu(desc[0].retval);
6437
6438         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6439                                              HCLGE_MAC_VLAN_LKUP);
6440 }
6441
6442 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6443                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6444                                   struct hclge_desc *mc_desc)
6445 {
6446         struct hclge_dev *hdev = vport->back;
6447         int cfg_status;
6448         u8 resp_code;
6449         u16 retval;
6450         int ret;
6451
6452         if (!mc_desc) {
6453                 struct hclge_desc desc;
6454
6455                 hclge_cmd_setup_basic_desc(&desc,
6456                                            HCLGE_OPC_MAC_VLAN_ADD,
6457                                            false);
6458                 memcpy(desc.data, req,
6459                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6460                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6461                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6462                 retval = le16_to_cpu(desc.retval);
6463
6464                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6465                                                            resp_code,
6466                                                            HCLGE_MAC_VLAN_ADD);
6467         } else {
6468                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6469                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6470                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6471                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6472                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6473                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6474                 memcpy(mc_desc[0].data, req,
6475                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6476                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6477                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6478                 retval = le16_to_cpu(mc_desc[0].retval);
6479
6480                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6481                                                            resp_code,
6482                                                            HCLGE_MAC_VLAN_ADD);
6483         }
6484
6485         if (ret) {
6486                 dev_err(&hdev->pdev->dev,
6487                         "add mac addr failed for cmd_send, ret =%d.\n",
6488                         ret);
6489                 return ret;
6490         }
6491
6492         return cfg_status;
6493 }
6494
6495 static int hclge_init_umv_space(struct hclge_dev *hdev)
6496 {
6497         u16 allocated_size = 0;
6498         int ret;
6499
6500         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6501                                   true);
6502         if (ret)
6503                 return ret;
6504
6505         if (allocated_size < hdev->wanted_umv_size)
6506                 dev_warn(&hdev->pdev->dev,
6507                          "Alloc umv space failed, want %d, get %d\n",
6508                          hdev->wanted_umv_size, allocated_size);
6509
6510         mutex_init(&hdev->umv_mutex);
6511         hdev->max_umv_size = allocated_size;
6512         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6513          * preserve some unicast mac vlan table entries shared by pf
6514          * and its vfs.
6515          */
6516         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6517         hdev->share_umv_size = hdev->priv_umv_size +
6518                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6519
6520         return 0;
6521 }
6522
6523 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6524 {
6525         int ret;
6526
6527         if (hdev->max_umv_size > 0) {
6528                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6529                                           false);
6530                 if (ret)
6531                         return ret;
6532                 hdev->max_umv_size = 0;
6533         }
6534         mutex_destroy(&hdev->umv_mutex);
6535
6536         return 0;
6537 }
6538
6539 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6540                                u16 *allocated_size, bool is_alloc)
6541 {
6542         struct hclge_umv_spc_alc_cmd *req;
6543         struct hclge_desc desc;
6544         int ret;
6545
6546         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6547         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6548         if (!is_alloc)
6549                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6550
6551         req->space_size = cpu_to_le32(space_size);
6552
6553         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6554         if (ret) {
6555                 dev_err(&hdev->pdev->dev,
6556                         "%s umv space failed for cmd_send, ret =%d\n",
6557                         is_alloc ? "allocate" : "free", ret);
6558                 return ret;
6559         }
6560
6561         if (is_alloc && allocated_size)
6562                 *allocated_size = le32_to_cpu(desc.data[1]);
6563
6564         return 0;
6565 }
6566
6567 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6568 {
6569         struct hclge_vport *vport;
6570         int i;
6571
6572         for (i = 0; i < hdev->num_alloc_vport; i++) {
6573                 vport = &hdev->vport[i];
6574                 vport->used_umv_num = 0;
6575         }
6576
6577         mutex_lock(&hdev->umv_mutex);
6578         hdev->share_umv_size = hdev->priv_umv_size +
6579                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6580         mutex_unlock(&hdev->umv_mutex);
6581 }
6582
6583 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6584 {
6585         struct hclge_dev *hdev = vport->back;
6586         bool is_full;
6587
6588         mutex_lock(&hdev->umv_mutex);
6589         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6590                    hdev->share_umv_size == 0);
6591         mutex_unlock(&hdev->umv_mutex);
6592
6593         return is_full;
6594 }
6595
6596 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6597 {
6598         struct hclge_dev *hdev = vport->back;
6599
6600         mutex_lock(&hdev->umv_mutex);
6601         if (is_free) {
6602                 if (vport->used_umv_num > hdev->priv_umv_size)
6603                         hdev->share_umv_size++;
6604
6605                 if (vport->used_umv_num > 0)
6606                         vport->used_umv_num--;
6607         } else {
6608                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6609                     hdev->share_umv_size > 0)
6610                         hdev->share_umv_size--;
6611                 vport->used_umv_num++;
6612         }
6613         mutex_unlock(&hdev->umv_mutex);
6614 }
6615
6616 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6617                              const unsigned char *addr)
6618 {
6619         struct hclge_vport *vport = hclge_get_vport(handle);
6620
6621         return hclge_add_uc_addr_common(vport, addr);
6622 }
6623
6624 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6625                              const unsigned char *addr)
6626 {
6627         struct hclge_dev *hdev = vport->back;
6628         struct hclge_mac_vlan_tbl_entry_cmd req;
6629         struct hclge_desc desc;
6630         u16 egress_port = 0;
6631         int ret;
6632
6633         /* mac addr check */
6634         if (is_zero_ether_addr(addr) ||
6635             is_broadcast_ether_addr(addr) ||
6636             is_multicast_ether_addr(addr)) {
6637                 dev_err(&hdev->pdev->dev,
6638                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6639                          addr, is_zero_ether_addr(addr),
6640                          is_broadcast_ether_addr(addr),
6641                          is_multicast_ether_addr(addr));
6642                 return -EINVAL;
6643         }
6644
6645         memset(&req, 0, sizeof(req));
6646
6647         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6648                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6649
6650         req.egress_port = cpu_to_le16(egress_port);
6651
6652         hclge_prepare_mac_addr(&req, addr, false);
6653
6654         /* Lookup the mac address in the mac_vlan table, and add
6655          * it if the entry is inexistent. Repeated unicast entry
6656          * is not allowed in the mac vlan table.
6657          */
6658         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6659         if (ret == -ENOENT) {
6660                 if (!hclge_is_umv_space_full(vport)) {
6661                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6662                         if (!ret)
6663                                 hclge_update_umv_space(vport, false);
6664                         return ret;
6665                 }
6666
6667                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6668                         hdev->priv_umv_size);
6669
6670                 return -ENOSPC;
6671         }
6672
6673         /* check if we just hit the duplicate */
6674         if (!ret) {
6675                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6676                          vport->vport_id, addr);
6677                 return 0;
6678         }
6679
6680         dev_err(&hdev->pdev->dev,
6681                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6682                 addr);
6683
6684         return ret;
6685 }
6686
6687 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6688                             const unsigned char *addr)
6689 {
6690         struct hclge_vport *vport = hclge_get_vport(handle);
6691
6692         return hclge_rm_uc_addr_common(vport, addr);
6693 }
6694
6695 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6696                             const unsigned char *addr)
6697 {
6698         struct hclge_dev *hdev = vport->back;
6699         struct hclge_mac_vlan_tbl_entry_cmd req;
6700         int ret;
6701
6702         /* mac addr check */
6703         if (is_zero_ether_addr(addr) ||
6704             is_broadcast_ether_addr(addr) ||
6705             is_multicast_ether_addr(addr)) {
6706                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6707                         addr);
6708                 return -EINVAL;
6709         }
6710
6711         memset(&req, 0, sizeof(req));
6712         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6713         hclge_prepare_mac_addr(&req, addr, false);
6714         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6715         if (!ret)
6716                 hclge_update_umv_space(vport, true);
6717
6718         return ret;
6719 }
6720
6721 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6722                              const unsigned char *addr)
6723 {
6724         struct hclge_vport *vport = hclge_get_vport(handle);
6725
6726         return hclge_add_mc_addr_common(vport, addr);
6727 }
6728
6729 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6730                              const unsigned char *addr)
6731 {
6732         struct hclge_dev *hdev = vport->back;
6733         struct hclge_mac_vlan_tbl_entry_cmd req;
6734         struct hclge_desc desc[3];
6735         int status;
6736
6737         /* mac addr check */
6738         if (!is_multicast_ether_addr(addr)) {
6739                 dev_err(&hdev->pdev->dev,
6740                         "Add mc mac err! invalid mac:%pM.\n",
6741                          addr);
6742                 return -EINVAL;
6743         }
6744         memset(&req, 0, sizeof(req));
6745         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6746         hclge_prepare_mac_addr(&req, addr, true);
6747         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6748         if (status) {
6749                 /* This mac addr do not exist, add new entry for it */
6750                 memset(desc[0].data, 0, sizeof(desc[0].data));
6751                 memset(desc[1].data, 0, sizeof(desc[0].data));
6752                 memset(desc[2].data, 0, sizeof(desc[0].data));
6753         }
6754         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6755         if (status)
6756                 return status;
6757         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6758
6759         if (status == -ENOSPC)
6760                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6761
6762         return status;
6763 }
6764
6765 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6766                             const unsigned char *addr)
6767 {
6768         struct hclge_vport *vport = hclge_get_vport(handle);
6769
6770         return hclge_rm_mc_addr_common(vport, addr);
6771 }
6772
6773 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6774                             const unsigned char *addr)
6775 {
6776         struct hclge_dev *hdev = vport->back;
6777         struct hclge_mac_vlan_tbl_entry_cmd req;
6778         enum hclge_cmd_status status;
6779         struct hclge_desc desc[3];
6780
6781         /* mac addr check */
6782         if (!is_multicast_ether_addr(addr)) {
6783                 dev_dbg(&hdev->pdev->dev,
6784                         "Remove mc mac err! invalid mac:%pM.\n",
6785                          addr);
6786                 return -EINVAL;
6787         }
6788
6789         memset(&req, 0, sizeof(req));
6790         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6791         hclge_prepare_mac_addr(&req, addr, true);
6792         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6793         if (!status) {
6794                 /* This mac addr exist, remove this handle's VFID for it */
6795                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6796                 if (status)
6797                         return status;
6798
6799                 if (hclge_is_all_function_id_zero(desc))
6800                         /* All the vfid is zero, so need to delete this entry */
6801                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6802                 else
6803                         /* Not all the vfid is zero, update the vfid */
6804                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6805
6806         } else {
6807                 /* Maybe this mac address is in mta table, but it cannot be
6808                  * deleted here because an entry of mta represents an address
6809                  * range rather than a specific address. the delete action to
6810                  * all entries will take effect in update_mta_status called by
6811                  * hns3_nic_set_rx_mode.
6812                  */
6813                 status = 0;
6814         }
6815
6816         return status;
6817 }
6818
6819 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6820                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6821 {
6822         struct hclge_vport_mac_addr_cfg *mac_cfg;
6823         struct list_head *list;
6824
6825         if (!vport->vport_id)
6826                 return;
6827
6828         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6829         if (!mac_cfg)
6830                 return;
6831
6832         mac_cfg->hd_tbl_status = true;
6833         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6834
6835         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6836                &vport->uc_mac_list : &vport->mc_mac_list;
6837
6838         list_add_tail(&mac_cfg->node, list);
6839 }
6840
6841 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6842                               bool is_write_tbl,
6843                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6844 {
6845         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6846         struct list_head *list;
6847         bool uc_flag, mc_flag;
6848
6849         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6850                &vport->uc_mac_list : &vport->mc_mac_list;
6851
6852         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6853         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6854
6855         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6856                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6857                         if (uc_flag && mac_cfg->hd_tbl_status)
6858                                 hclge_rm_uc_addr_common(vport, mac_addr);
6859
6860                         if (mc_flag && mac_cfg->hd_tbl_status)
6861                                 hclge_rm_mc_addr_common(vport, mac_addr);
6862
6863                         list_del(&mac_cfg->node);
6864                         kfree(mac_cfg);
6865                         break;
6866                 }
6867         }
6868 }
6869
6870 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6871                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6872 {
6873         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6874         struct list_head *list;
6875
6876         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6877                &vport->uc_mac_list : &vport->mc_mac_list;
6878
6879         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6880                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6881                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6882
6883                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6884                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6885
6886                 mac_cfg->hd_tbl_status = false;
6887                 if (is_del_list) {
6888                         list_del(&mac_cfg->node);
6889                         kfree(mac_cfg);
6890                 }
6891         }
6892 }
6893
6894 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6895 {
6896         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6897         struct hclge_vport *vport;
6898         int i;
6899
6900         mutex_lock(&hdev->vport_cfg_mutex);
6901         for (i = 0; i < hdev->num_alloc_vport; i++) {
6902                 vport = &hdev->vport[i];
6903                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6904                         list_del(&mac->node);
6905                         kfree(mac);
6906                 }
6907
6908                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6909                         list_del(&mac->node);
6910                         kfree(mac);
6911                 }
6912         }
6913         mutex_unlock(&hdev->vport_cfg_mutex);
6914 }
6915
6916 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6917                                               u16 cmdq_resp, u8 resp_code)
6918 {
6919 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6920 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6921 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6922 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6923
6924         int return_status;
6925
6926         if (cmdq_resp) {
6927                 dev_err(&hdev->pdev->dev,
6928                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6929                         cmdq_resp);
6930                 return -EIO;
6931         }
6932
6933         switch (resp_code) {
6934         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6935         case HCLGE_ETHERTYPE_ALREADY_ADD:
6936                 return_status = 0;
6937                 break;
6938         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6939                 dev_err(&hdev->pdev->dev,
6940                         "add mac ethertype failed for manager table overflow.\n");
6941                 return_status = -EIO;
6942                 break;
6943         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6944                 dev_err(&hdev->pdev->dev,
6945                         "add mac ethertype failed for key conflict.\n");
6946                 return_status = -EIO;
6947                 break;
6948         default:
6949                 dev_err(&hdev->pdev->dev,
6950                         "add mac ethertype failed for undefined, code=%d.\n",
6951                         resp_code);
6952                 return_status = -EIO;
6953         }
6954
6955         return return_status;
6956 }
6957
6958 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6959                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6960 {
6961         struct hclge_desc desc;
6962         u8 resp_code;
6963         u16 retval;
6964         int ret;
6965
6966         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6967         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6968
6969         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6970         if (ret) {
6971                 dev_err(&hdev->pdev->dev,
6972                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6973                         ret);
6974                 return ret;
6975         }
6976
6977         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6978         retval = le16_to_cpu(desc.retval);
6979
6980         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6981 }
6982
6983 static int init_mgr_tbl(struct hclge_dev *hdev)
6984 {
6985         int ret;
6986         int i;
6987
6988         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6989                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6990                 if (ret) {
6991                         dev_err(&hdev->pdev->dev,
6992                                 "add mac ethertype failed, ret =%d.\n",
6993                                 ret);
6994                         return ret;
6995                 }
6996         }
6997
6998         return 0;
6999 }
7000
7001 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7002 {
7003         struct hclge_vport *vport = hclge_get_vport(handle);
7004         struct hclge_dev *hdev = vport->back;
7005
7006         ether_addr_copy(p, hdev->hw.mac.mac_addr);
7007 }
7008
7009 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7010                               bool is_first)
7011 {
7012         const unsigned char *new_addr = (const unsigned char *)p;
7013         struct hclge_vport *vport = hclge_get_vport(handle);
7014         struct hclge_dev *hdev = vport->back;
7015         int ret;
7016
7017         /* mac addr check */
7018         if (is_zero_ether_addr(new_addr) ||
7019             is_broadcast_ether_addr(new_addr) ||
7020             is_multicast_ether_addr(new_addr)) {
7021                 dev_err(&hdev->pdev->dev,
7022                         "Change uc mac err! invalid mac:%p.\n",
7023                          new_addr);
7024                 return -EINVAL;
7025         }
7026
7027         if ((!is_first || is_kdump_kernel()) &&
7028             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7029                 dev_warn(&hdev->pdev->dev,
7030                          "remove old uc mac address fail.\n");
7031
7032         ret = hclge_add_uc_addr(handle, new_addr);
7033         if (ret) {
7034                 dev_err(&hdev->pdev->dev,
7035                         "add uc mac address fail, ret =%d.\n",
7036                         ret);
7037
7038                 if (!is_first &&
7039                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7040                         dev_err(&hdev->pdev->dev,
7041                                 "restore uc mac address fail.\n");
7042
7043                 return -EIO;
7044         }
7045
7046         ret = hclge_pause_addr_cfg(hdev, new_addr);
7047         if (ret) {
7048                 dev_err(&hdev->pdev->dev,
7049                         "configure mac pause address fail, ret =%d.\n",
7050                         ret);
7051                 return -EIO;
7052         }
7053
7054         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7055
7056         return 0;
7057 }
7058
7059 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7060                           int cmd)
7061 {
7062         struct hclge_vport *vport = hclge_get_vport(handle);
7063         struct hclge_dev *hdev = vport->back;
7064
7065         if (!hdev->hw.mac.phydev)
7066                 return -EOPNOTSUPP;
7067
7068         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7069 }
7070
7071 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7072                                       u8 fe_type, bool filter_en, u8 vf_id)
7073 {
7074         struct hclge_vlan_filter_ctrl_cmd *req;
7075         struct hclge_desc desc;
7076         int ret;
7077
7078         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7079
7080         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7081         req->vlan_type = vlan_type;
7082         req->vlan_fe = filter_en ? fe_type : 0;
7083         req->vf_id = vf_id;
7084
7085         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7086         if (ret)
7087                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7088                         ret);
7089
7090         return ret;
7091 }
7092
7093 #define HCLGE_FILTER_TYPE_VF            0
7094 #define HCLGE_FILTER_TYPE_PORT          1
7095 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7096 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7097 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7098 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7099 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7100 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7101                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7102 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7103                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7104
7105 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7106 {
7107         struct hclge_vport *vport = hclge_get_vport(handle);
7108         struct hclge_dev *hdev = vport->back;
7109
7110         if (hdev->pdev->revision >= 0x21) {
7111                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7112                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7113                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7114                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7115         } else {
7116                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7117                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7118                                            0);
7119         }
7120         if (enable)
7121                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7122         else
7123                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7124 }
7125
7126 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7127                                     bool is_kill, u16 vlan, u8 qos,
7128                                     __be16 proto)
7129 {
7130 #define HCLGE_MAX_VF_BYTES  16
7131         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7132         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7133         struct hclge_desc desc[2];
7134         u8 vf_byte_val;
7135         u8 vf_byte_off;
7136         int ret;
7137
7138         /* if vf vlan table is full, firmware will close vf vlan filter, it
7139          * is unable and unnecessary to add new vlan id to vf vlan filter
7140          */
7141         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7142                 return 0;
7143
7144         hclge_cmd_setup_basic_desc(&desc[0],
7145                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7146         hclge_cmd_setup_basic_desc(&desc[1],
7147                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7148
7149         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7150
7151         vf_byte_off = vfid / 8;
7152         vf_byte_val = 1 << (vfid % 8);
7153
7154         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7155         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7156
7157         req0->vlan_id  = cpu_to_le16(vlan);
7158         req0->vlan_cfg = is_kill;
7159
7160         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7161                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7162         else
7163                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7164
7165         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7166         if (ret) {
7167                 dev_err(&hdev->pdev->dev,
7168                         "Send vf vlan command fail, ret =%d.\n",
7169                         ret);
7170                 return ret;
7171         }
7172
7173         if (!is_kill) {
7174 #define HCLGE_VF_VLAN_NO_ENTRY  2
7175                 if (!req0->resp_code || req0->resp_code == 1)
7176                         return 0;
7177
7178                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7179                         set_bit(vfid, hdev->vf_vlan_full);
7180                         dev_warn(&hdev->pdev->dev,
7181                                  "vf vlan table is full, vf vlan filter is disabled\n");
7182                         return 0;
7183                 }
7184
7185                 dev_err(&hdev->pdev->dev,
7186                         "Add vf vlan filter fail, ret =%d.\n",
7187                         req0->resp_code);
7188         } else {
7189 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7190                 if (!req0->resp_code)
7191                         return 0;
7192
7193                 /* vf vlan filter is disabled when vf vlan table is full,
7194                  * then new vlan id will not be added into vf vlan table.
7195                  * Just return 0 without warning, avoid massive verbose
7196                  * print logs when unload.
7197                  */
7198                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7199                         return 0;
7200
7201                 dev_err(&hdev->pdev->dev,
7202                         "Kill vf vlan filter fail, ret =%d.\n",
7203                         req0->resp_code);
7204         }
7205
7206         return -EIO;
7207 }
7208
7209 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7210                                       u16 vlan_id, bool is_kill)
7211 {
7212         struct hclge_vlan_filter_pf_cfg_cmd *req;
7213         struct hclge_desc desc;
7214         u8 vlan_offset_byte_val;
7215         u8 vlan_offset_byte;
7216         u8 vlan_offset_160;
7217         int ret;
7218
7219         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7220
7221         vlan_offset_160 = vlan_id / 160;
7222         vlan_offset_byte = (vlan_id % 160) / 8;
7223         vlan_offset_byte_val = 1 << (vlan_id % 8);
7224
7225         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7226         req->vlan_offset = vlan_offset_160;
7227         req->vlan_cfg = is_kill;
7228         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7229
7230         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7231         if (ret)
7232                 dev_err(&hdev->pdev->dev,
7233                         "port vlan command, send fail, ret =%d.\n", ret);
7234         return ret;
7235 }
7236
7237 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7238                                     u16 vport_id, u16 vlan_id, u8 qos,
7239                                     bool is_kill)
7240 {
7241         u16 vport_idx, vport_num = 0;
7242         int ret;
7243
7244         if (is_kill && !vlan_id)
7245                 return 0;
7246
7247         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7248                                        0, proto);
7249         if (ret) {
7250                 dev_err(&hdev->pdev->dev,
7251                         "Set %d vport vlan filter config fail, ret =%d.\n",
7252                         vport_id, ret);
7253                 return ret;
7254         }
7255
7256         /* vlan 0 may be added twice when 8021q module is enabled */
7257         if (!is_kill && !vlan_id &&
7258             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7259                 return 0;
7260
7261         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7262                 dev_err(&hdev->pdev->dev,
7263                         "Add port vlan failed, vport %d is already in vlan %d\n",
7264                         vport_id, vlan_id);
7265                 return -EINVAL;
7266         }
7267
7268         if (is_kill &&
7269             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7270                 dev_err(&hdev->pdev->dev,
7271                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7272                         vport_id, vlan_id);
7273                 return -EINVAL;
7274         }
7275
7276         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7277                 vport_num++;
7278
7279         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7280                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7281                                                  is_kill);
7282
7283         return ret;
7284 }
7285
7286 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7287 {
7288         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7289         struct hclge_vport_vtag_tx_cfg_cmd *req;
7290         struct hclge_dev *hdev = vport->back;
7291         struct hclge_desc desc;
7292         int status;
7293
7294         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7295
7296         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7297         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7298         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7299         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7300                       vcfg->accept_tag1 ? 1 : 0);
7301         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7302                       vcfg->accept_untag1 ? 1 : 0);
7303         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7304                       vcfg->accept_tag2 ? 1 : 0);
7305         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7306                       vcfg->accept_untag2 ? 1 : 0);
7307         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7308                       vcfg->insert_tag1_en ? 1 : 0);
7309         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7310                       vcfg->insert_tag2_en ? 1 : 0);
7311         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7312
7313         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7314         req->vf_bitmap[req->vf_offset] =
7315                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7316
7317         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7318         if (status)
7319                 dev_err(&hdev->pdev->dev,
7320                         "Send port txvlan cfg command fail, ret =%d\n",
7321                         status);
7322
7323         return status;
7324 }
7325
7326 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7327 {
7328         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7329         struct hclge_vport_vtag_rx_cfg_cmd *req;
7330         struct hclge_dev *hdev = vport->back;
7331         struct hclge_desc desc;
7332         int status;
7333
7334         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7335
7336         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7337         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7338                       vcfg->strip_tag1_en ? 1 : 0);
7339         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7340                       vcfg->strip_tag2_en ? 1 : 0);
7341         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7342                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7343         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7344                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7345
7346         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7347         req->vf_bitmap[req->vf_offset] =
7348                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7349
7350         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7351         if (status)
7352                 dev_err(&hdev->pdev->dev,
7353                         "Send port rxvlan cfg command fail, ret =%d\n",
7354                         status);
7355
7356         return status;
7357 }
7358
7359 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7360                                   u16 port_base_vlan_state,
7361                                   u16 vlan_tag)
7362 {
7363         int ret;
7364
7365         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7366                 vport->txvlan_cfg.accept_tag1 = true;
7367                 vport->txvlan_cfg.insert_tag1_en = false;
7368                 vport->txvlan_cfg.default_tag1 = 0;
7369         } else {
7370                 vport->txvlan_cfg.accept_tag1 = false;
7371                 vport->txvlan_cfg.insert_tag1_en = true;
7372                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7373         }
7374
7375         vport->txvlan_cfg.accept_untag1 = true;
7376
7377         /* accept_tag2 and accept_untag2 are not supported on
7378          * pdev revision(0x20), new revision support them,
7379          * this two fields can not be configured by user.
7380          */
7381         vport->txvlan_cfg.accept_tag2 = true;
7382         vport->txvlan_cfg.accept_untag2 = true;
7383         vport->txvlan_cfg.insert_tag2_en = false;
7384         vport->txvlan_cfg.default_tag2 = 0;
7385
7386         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7387                 vport->rxvlan_cfg.strip_tag1_en = false;
7388                 vport->rxvlan_cfg.strip_tag2_en =
7389                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7390         } else {
7391                 vport->rxvlan_cfg.strip_tag1_en =
7392                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7393                 vport->rxvlan_cfg.strip_tag2_en = true;
7394         }
7395         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7396         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7397
7398         ret = hclge_set_vlan_tx_offload_cfg(vport);
7399         if (ret)
7400                 return ret;
7401
7402         return hclge_set_vlan_rx_offload_cfg(vport);
7403 }
7404
7405 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7406 {
7407         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7408         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7409         struct hclge_desc desc;
7410         int status;
7411
7412         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7413         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7414         rx_req->ot_fst_vlan_type =
7415                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7416         rx_req->ot_sec_vlan_type =
7417                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7418         rx_req->in_fst_vlan_type =
7419                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7420         rx_req->in_sec_vlan_type =
7421                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7422
7423         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7424         if (status) {
7425                 dev_err(&hdev->pdev->dev,
7426                         "Send rxvlan protocol type command fail, ret =%d\n",
7427                         status);
7428                 return status;
7429         }
7430
7431         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7432
7433         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7434         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7435         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7436
7437         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7438         if (status)
7439                 dev_err(&hdev->pdev->dev,
7440                         "Send txvlan protocol type command fail, ret =%d\n",
7441                         status);
7442
7443         return status;
7444 }
7445
7446 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7447 {
7448 #define HCLGE_DEF_VLAN_TYPE             0x8100
7449
7450         struct hnae3_handle *handle = &hdev->vport[0].nic;
7451         struct hclge_vport *vport;
7452         int ret;
7453         int i;
7454
7455         if (hdev->pdev->revision >= 0x21) {
7456                 /* for revision 0x21, vf vlan filter is per function */
7457                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7458                         vport = &hdev->vport[i];
7459                         ret = hclge_set_vlan_filter_ctrl(hdev,
7460                                                          HCLGE_FILTER_TYPE_VF,
7461                                                          HCLGE_FILTER_FE_EGRESS,
7462                                                          true,
7463                                                          vport->vport_id);
7464                         if (ret)
7465                                 return ret;
7466                 }
7467
7468                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7469                                                  HCLGE_FILTER_FE_INGRESS, true,
7470                                                  0);
7471                 if (ret)
7472                         return ret;
7473         } else {
7474                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7475                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7476                                                  true, 0);
7477                 if (ret)
7478                         return ret;
7479         }
7480
7481         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7482
7483         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7484         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7485         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7486         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7487         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7488         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7489
7490         ret = hclge_set_vlan_protocol_type(hdev);
7491         if (ret)
7492                 return ret;
7493
7494         for (i = 0; i < hdev->num_alloc_vport; i++) {
7495                 u16 vlan_tag;
7496
7497                 vport = &hdev->vport[i];
7498                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7499
7500                 ret = hclge_vlan_offload_cfg(vport,
7501                                              vport->port_base_vlan_cfg.state,
7502                                              vlan_tag);
7503                 if (ret)
7504                         return ret;
7505         }
7506
7507         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7508 }
7509
7510 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7511                                        bool writen_to_tbl)
7512 {
7513         struct hclge_vport_vlan_cfg *vlan;
7514
7515         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7516         if (!vlan)
7517                 return;
7518
7519         vlan->hd_tbl_status = writen_to_tbl;
7520         vlan->vlan_id = vlan_id;
7521
7522         list_add_tail(&vlan->node, &vport->vlan_list);
7523 }
7524
7525 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7526 {
7527         struct hclge_vport_vlan_cfg *vlan, *tmp;
7528         struct hclge_dev *hdev = vport->back;
7529         int ret;
7530
7531         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7532                 if (!vlan->hd_tbl_status) {
7533                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7534                                                        vport->vport_id,
7535                                                        vlan->vlan_id, 0, false);
7536                         if (ret) {
7537                                 dev_err(&hdev->pdev->dev,
7538                                         "restore vport vlan list failed, ret=%d\n",
7539                                         ret);
7540                                 return ret;
7541                         }
7542                 }
7543                 vlan->hd_tbl_status = true;
7544         }
7545
7546         return 0;
7547 }
7548
7549 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7550                                       bool is_write_tbl)
7551 {
7552         struct hclge_vport_vlan_cfg *vlan, *tmp;
7553         struct hclge_dev *hdev = vport->back;
7554
7555         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7556                 if (vlan->vlan_id == vlan_id) {
7557                         if (is_write_tbl && vlan->hd_tbl_status)
7558                                 hclge_set_vlan_filter_hw(hdev,
7559                                                          htons(ETH_P_8021Q),
7560                                                          vport->vport_id,
7561                                                          vlan_id, 0,
7562                                                          true);
7563
7564                         list_del(&vlan->node);
7565                         kfree(vlan);
7566                         break;
7567                 }
7568         }
7569 }
7570
7571 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7572 {
7573         struct hclge_vport_vlan_cfg *vlan, *tmp;
7574         struct hclge_dev *hdev = vport->back;
7575
7576         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7577                 if (vlan->hd_tbl_status)
7578                         hclge_set_vlan_filter_hw(hdev,
7579                                                  htons(ETH_P_8021Q),
7580                                                  vport->vport_id,
7581                                                  vlan->vlan_id, 0,
7582                                                  true);
7583
7584                 vlan->hd_tbl_status = false;
7585                 if (is_del_list) {
7586                         list_del(&vlan->node);
7587                         kfree(vlan);
7588                 }
7589         }
7590 }
7591
7592 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7593 {
7594         struct hclge_vport_vlan_cfg *vlan, *tmp;
7595         struct hclge_vport *vport;
7596         int i;
7597
7598         mutex_lock(&hdev->vport_cfg_mutex);
7599         for (i = 0; i < hdev->num_alloc_vport; i++) {
7600                 vport = &hdev->vport[i];
7601                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7602                         list_del(&vlan->node);
7603                         kfree(vlan);
7604                 }
7605         }
7606         mutex_unlock(&hdev->vport_cfg_mutex);
7607 }
7608
7609 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7610 {
7611         struct hclge_vport *vport = hclge_get_vport(handle);
7612         struct hclge_vport_vlan_cfg *vlan, *tmp;
7613         struct hclge_dev *hdev = vport->back;
7614         u16 vlan_proto, qos;
7615         u16 state, vlan_id;
7616         int i;
7617
7618         mutex_lock(&hdev->vport_cfg_mutex);
7619         for (i = 0; i < hdev->num_alloc_vport; i++) {
7620                 vport = &hdev->vport[i];
7621                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7622                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7623                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7624                 state = vport->port_base_vlan_cfg.state;
7625
7626                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7627                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7628                                                  vport->vport_id, vlan_id, qos,
7629                                                  false);
7630                         continue;
7631                 }
7632
7633                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7634                         if (vlan->hd_tbl_status)
7635                                 hclge_set_vlan_filter_hw(hdev,
7636                                                          htons(ETH_P_8021Q),
7637                                                          vport->vport_id,
7638                                                          vlan->vlan_id, 0,
7639                                                          false);
7640                 }
7641         }
7642
7643         mutex_unlock(&hdev->vport_cfg_mutex);
7644 }
7645
7646 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7647 {
7648         struct hclge_vport *vport = hclge_get_vport(handle);
7649
7650         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7651                 vport->rxvlan_cfg.strip_tag1_en = false;
7652                 vport->rxvlan_cfg.strip_tag2_en = enable;
7653         } else {
7654                 vport->rxvlan_cfg.strip_tag1_en = enable;
7655                 vport->rxvlan_cfg.strip_tag2_en = true;
7656         }
7657         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7658         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7659         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7660
7661         return hclge_set_vlan_rx_offload_cfg(vport);
7662 }
7663
7664 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7665                                             u16 port_base_vlan_state,
7666                                             struct hclge_vlan_info *new_info,
7667                                             struct hclge_vlan_info *old_info)
7668 {
7669         struct hclge_dev *hdev = vport->back;
7670         int ret;
7671
7672         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7673                 hclge_rm_vport_all_vlan_table(vport, false);
7674                 return hclge_set_vlan_filter_hw(hdev,
7675                                                  htons(new_info->vlan_proto),
7676                                                  vport->vport_id,
7677                                                  new_info->vlan_tag,
7678                                                  new_info->qos, false);
7679         }
7680
7681         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7682                                        vport->vport_id, old_info->vlan_tag,
7683                                        old_info->qos, true);
7684         if (ret)
7685                 return ret;
7686
7687         return hclge_add_vport_all_vlan_table(vport);
7688 }
7689
7690 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7691                                     struct hclge_vlan_info *vlan_info)
7692 {
7693         struct hnae3_handle *nic = &vport->nic;
7694         struct hclge_vlan_info *old_vlan_info;
7695         struct hclge_dev *hdev = vport->back;
7696         int ret;
7697
7698         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7699
7700         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7701         if (ret)
7702                 return ret;
7703
7704         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7705                 /* add new VLAN tag */
7706                 ret = hclge_set_vlan_filter_hw(hdev,
7707                                                htons(vlan_info->vlan_proto),
7708                                                vport->vport_id,
7709                                                vlan_info->vlan_tag,
7710                                                vlan_info->qos, false);
7711                 if (ret)
7712                         return ret;
7713
7714                 /* remove old VLAN tag */
7715                 ret = hclge_set_vlan_filter_hw(hdev,
7716                                                htons(old_vlan_info->vlan_proto),
7717                                                vport->vport_id,
7718                                                old_vlan_info->vlan_tag,
7719                                                old_vlan_info->qos, true);
7720                 if (ret)
7721                         return ret;
7722
7723                 goto update;
7724         }
7725
7726         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7727                                                old_vlan_info);
7728         if (ret)
7729                 return ret;
7730
7731         /* update state only when disable/enable port based VLAN */
7732         vport->port_base_vlan_cfg.state = state;
7733         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7734                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7735         else
7736                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7737
7738 update:
7739         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7740         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7741         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7742
7743         return 0;
7744 }
7745
7746 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7747                                           enum hnae3_port_base_vlan_state state,
7748                                           u16 vlan)
7749 {
7750         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7751                 if (!vlan)
7752                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7753                 else
7754                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7755         } else {
7756                 if (!vlan)
7757                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7758                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7759                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7760                 else
7761                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7762         }
7763 }
7764
7765 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7766                                     u16 vlan, u8 qos, __be16 proto)
7767 {
7768         struct hclge_vport *vport = hclge_get_vport(handle);
7769         struct hclge_dev *hdev = vport->back;
7770         struct hclge_vlan_info vlan_info;
7771         u16 state;
7772         int ret;
7773
7774         if (hdev->pdev->revision == 0x20)
7775                 return -EOPNOTSUPP;
7776
7777         /* qos is a 3 bits value, so can not be bigger than 7 */
7778         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7779                 return -EINVAL;
7780         if (proto != htons(ETH_P_8021Q))
7781                 return -EPROTONOSUPPORT;
7782
7783         vport = &hdev->vport[vfid];
7784         state = hclge_get_port_base_vlan_state(vport,
7785                                                vport->port_base_vlan_cfg.state,
7786                                                vlan);
7787         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7788                 return 0;
7789
7790         vlan_info.vlan_tag = vlan;
7791         vlan_info.qos = qos;
7792         vlan_info.vlan_proto = ntohs(proto);
7793
7794         /* update port based VLAN for PF */
7795         if (!vfid) {
7796                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7797                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7798                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7799
7800                 return ret;
7801         }
7802
7803         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7804                 return hclge_update_port_base_vlan_cfg(vport, state,
7805                                                        &vlan_info);
7806         } else {
7807                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7808                                                         (u8)vfid, state,
7809                                                         vlan, qos,
7810                                                         ntohs(proto));
7811                 return ret;
7812         }
7813 }
7814
7815 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7816                           u16 vlan_id, bool is_kill)
7817 {
7818         struct hclge_vport *vport = hclge_get_vport(handle);
7819         struct hclge_dev *hdev = vport->back;
7820         bool writen_to_tbl = false;
7821         int ret = 0;
7822
7823         /* When device is resetting, firmware is unable to handle
7824          * mailbox. Just record the vlan id, and remove it after
7825          * reset finished.
7826          */
7827         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
7828                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
7829                 return -EBUSY;
7830         }
7831
7832         /* When port base vlan enabled, we use port base vlan as the vlan
7833          * filter entry. In this case, we don't update vlan filter table
7834          * when user add new vlan or remove exist vlan, just update the vport
7835          * vlan list. The vlan id in vlan list will be writen in vlan filter
7836          * table until port base vlan disabled
7837          */
7838         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7839                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7840                                                vlan_id, 0, is_kill);
7841                 writen_to_tbl = true;
7842         }
7843
7844         if (!ret) {
7845                 if (is_kill)
7846                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
7847                 else
7848                         hclge_add_vport_vlan_table(vport, vlan_id,
7849                                                    writen_to_tbl);
7850         } else if (is_kill) {
7851                 /* When remove hw vlan filter failed, record the vlan id,
7852                  * and try to remove it from hw later, to be consistence
7853                  * with stack
7854                  */
7855                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
7856         }
7857         return ret;
7858 }
7859
7860 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
7861 {
7862 #define HCLGE_MAX_SYNC_COUNT    60
7863
7864         int i, ret, sync_cnt = 0;
7865         u16 vlan_id;
7866
7867         /* start from vport 1 for PF is always alive */
7868         for (i = 0; i < hdev->num_alloc_vport; i++) {
7869                 struct hclge_vport *vport = &hdev->vport[i];
7870
7871                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7872                                          VLAN_N_VID);
7873                 while (vlan_id != VLAN_N_VID) {
7874                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7875                                                        vport->vport_id, vlan_id,
7876                                                        0, true);
7877                         if (ret && ret != -EINVAL)
7878                                 return;
7879
7880                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
7881                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
7882
7883                         sync_cnt++;
7884                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
7885                                 return;
7886
7887                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7888                                                  VLAN_N_VID);
7889                 }
7890         }
7891 }
7892
7893 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7894 {
7895         struct hclge_config_max_frm_size_cmd *req;
7896         struct hclge_desc desc;
7897
7898         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7899
7900         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7901         req->max_frm_size = cpu_to_le16(new_mps);
7902         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7903
7904         return hclge_cmd_send(&hdev->hw, &desc, 1);
7905 }
7906
7907 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7908 {
7909         struct hclge_vport *vport = hclge_get_vport(handle);
7910
7911         return hclge_set_vport_mtu(vport, new_mtu);
7912 }
7913
7914 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7915 {
7916         struct hclge_dev *hdev = vport->back;
7917         int i, max_frm_size, ret;
7918
7919         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7920         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7921             max_frm_size > HCLGE_MAC_MAX_FRAME)
7922                 return -EINVAL;
7923
7924         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7925         mutex_lock(&hdev->vport_lock);
7926         /* VF's mps must fit within hdev->mps */
7927         if (vport->vport_id && max_frm_size > hdev->mps) {
7928                 mutex_unlock(&hdev->vport_lock);
7929                 return -EINVAL;
7930         } else if (vport->vport_id) {
7931                 vport->mps = max_frm_size;
7932                 mutex_unlock(&hdev->vport_lock);
7933                 return 0;
7934         }
7935
7936         /* PF's mps must be greater then VF's mps */
7937         for (i = 1; i < hdev->num_alloc_vport; i++)
7938                 if (max_frm_size < hdev->vport[i].mps) {
7939                         mutex_unlock(&hdev->vport_lock);
7940                         return -EINVAL;
7941                 }
7942
7943         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7944
7945         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7946         if (ret) {
7947                 dev_err(&hdev->pdev->dev,
7948                         "Change mtu fail, ret =%d\n", ret);
7949                 goto out;
7950         }
7951
7952         hdev->mps = max_frm_size;
7953         vport->mps = max_frm_size;
7954
7955         ret = hclge_buffer_alloc(hdev);
7956         if (ret)
7957                 dev_err(&hdev->pdev->dev,
7958                         "Allocate buffer fail, ret =%d\n", ret);
7959
7960 out:
7961         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7962         mutex_unlock(&hdev->vport_lock);
7963         return ret;
7964 }
7965
7966 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7967                                     bool enable)
7968 {
7969         struct hclge_reset_tqp_queue_cmd *req;
7970         struct hclge_desc desc;
7971         int ret;
7972
7973         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7974
7975         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7976         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7977         if (enable)
7978                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
7979
7980         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7981         if (ret) {
7982                 dev_err(&hdev->pdev->dev,
7983                         "Send tqp reset cmd error, status =%d\n", ret);
7984                 return ret;
7985         }
7986
7987         return 0;
7988 }
7989
7990 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7991 {
7992         struct hclge_reset_tqp_queue_cmd *req;
7993         struct hclge_desc desc;
7994         int ret;
7995
7996         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7997
7998         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7999         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8000
8001         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8002         if (ret) {
8003                 dev_err(&hdev->pdev->dev,
8004                         "Get reset status error, status =%d\n", ret);
8005                 return ret;
8006         }
8007
8008         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8009 }
8010
8011 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8012 {
8013         struct hnae3_queue *queue;
8014         struct hclge_tqp *tqp;
8015
8016         queue = handle->kinfo.tqp[queue_id];
8017         tqp = container_of(queue, struct hclge_tqp, q);
8018
8019         return tqp->index;
8020 }
8021
8022 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8023 {
8024         struct hclge_vport *vport = hclge_get_vport(handle);
8025         struct hclge_dev *hdev = vport->back;
8026         int reset_try_times = 0;
8027         int reset_status;
8028         u16 queue_gid;
8029         int ret;
8030
8031         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8032
8033         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8034         if (ret) {
8035                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8036                 return ret;
8037         }
8038
8039         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8040         if (ret) {
8041                 dev_err(&hdev->pdev->dev,
8042                         "Send reset tqp cmd fail, ret = %d\n", ret);
8043                 return ret;
8044         }
8045
8046         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8047                 /* Wait for tqp hw reset */
8048                 msleep(20);
8049                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8050                 if (reset_status)
8051                         break;
8052         }
8053
8054         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8055                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8056                 return ret;
8057         }
8058
8059         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8060         if (ret)
8061                 dev_err(&hdev->pdev->dev,
8062                         "Deassert the soft reset fail, ret = %d\n", ret);
8063
8064         return ret;
8065 }
8066
8067 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8068 {
8069         struct hclge_dev *hdev = vport->back;
8070         int reset_try_times = 0;
8071         int reset_status;
8072         u16 queue_gid;
8073         int ret;
8074
8075         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8076
8077         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8078         if (ret) {
8079                 dev_warn(&hdev->pdev->dev,
8080                          "Send reset tqp cmd fail, ret = %d\n", ret);
8081                 return;
8082         }
8083
8084         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8085                 /* Wait for tqp hw reset */
8086                 msleep(20);
8087                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8088                 if (reset_status)
8089                         break;
8090         }
8091
8092         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8093                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8094                 return;
8095         }
8096
8097         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8098         if (ret)
8099                 dev_warn(&hdev->pdev->dev,
8100                          "Deassert the soft reset fail, ret = %d\n", ret);
8101 }
8102
8103 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8104 {
8105         struct hclge_vport *vport = hclge_get_vport(handle);
8106         struct hclge_dev *hdev = vport->back;
8107
8108         return hdev->fw_version;
8109 }
8110
8111 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8112 {
8113         struct phy_device *phydev = hdev->hw.mac.phydev;
8114
8115         if (!phydev)
8116                 return;
8117
8118         phy_set_asym_pause(phydev, rx_en, tx_en);
8119 }
8120
8121 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8122 {
8123         int ret;
8124
8125         if (rx_en && tx_en)
8126                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8127         else if (rx_en && !tx_en)
8128                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8129         else if (!rx_en && tx_en)
8130                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8131         else
8132                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8133
8134         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8135                 return 0;
8136
8137         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8138         if (ret) {
8139                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
8140                         ret);
8141                 return ret;
8142         }
8143
8144         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8145
8146         return 0;
8147 }
8148
8149 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8150 {
8151         struct phy_device *phydev = hdev->hw.mac.phydev;
8152         u16 remote_advertising = 0;
8153         u16 local_advertising;
8154         u32 rx_pause, tx_pause;
8155         u8 flowctl;
8156
8157         if (!phydev->link || !phydev->autoneg)
8158                 return 0;
8159
8160         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8161
8162         if (phydev->pause)
8163                 remote_advertising = LPA_PAUSE_CAP;
8164
8165         if (phydev->asym_pause)
8166                 remote_advertising |= LPA_PAUSE_ASYM;
8167
8168         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8169                                            remote_advertising);
8170         tx_pause = flowctl & FLOW_CTRL_TX;
8171         rx_pause = flowctl & FLOW_CTRL_RX;
8172
8173         if (phydev->duplex == HCLGE_MAC_HALF) {
8174                 tx_pause = 0;
8175                 rx_pause = 0;
8176         }
8177
8178         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8179 }
8180
8181 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8182                                  u32 *rx_en, u32 *tx_en)
8183 {
8184         struct hclge_vport *vport = hclge_get_vport(handle);
8185         struct hclge_dev *hdev = vport->back;
8186         struct phy_device *phydev = hdev->hw.mac.phydev;
8187
8188         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8189
8190         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8191                 *rx_en = 0;
8192                 *tx_en = 0;
8193                 return;
8194         }
8195
8196         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8197                 *rx_en = 1;
8198                 *tx_en = 0;
8199         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8200                 *tx_en = 1;
8201                 *rx_en = 0;
8202         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8203                 *rx_en = 1;
8204                 *tx_en = 1;
8205         } else {
8206                 *rx_en = 0;
8207                 *tx_en = 0;
8208         }
8209 }
8210
8211 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8212                                 u32 rx_en, u32 tx_en)
8213 {
8214         struct hclge_vport *vport = hclge_get_vport(handle);
8215         struct hclge_dev *hdev = vport->back;
8216         struct phy_device *phydev = hdev->hw.mac.phydev;
8217         u32 fc_autoneg;
8218
8219         if (phydev) {
8220                 fc_autoneg = hclge_get_autoneg(handle);
8221                 if (auto_neg != fc_autoneg) {
8222                         dev_info(&hdev->pdev->dev,
8223                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8224                         return -EOPNOTSUPP;
8225                 }
8226         }
8227
8228         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8229                 dev_info(&hdev->pdev->dev,
8230                          "Priority flow control enabled. Cannot set link flow control.\n");
8231                 return -EOPNOTSUPP;
8232         }
8233
8234         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8235
8236         if (!auto_neg)
8237                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8238
8239         if (phydev)
8240                 return phy_start_aneg(phydev);
8241
8242         return -EOPNOTSUPP;
8243 }
8244
8245 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8246                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8247 {
8248         struct hclge_vport *vport = hclge_get_vport(handle);
8249         struct hclge_dev *hdev = vport->back;
8250
8251         if (speed)
8252                 *speed = hdev->hw.mac.speed;
8253         if (duplex)
8254                 *duplex = hdev->hw.mac.duplex;
8255         if (auto_neg)
8256                 *auto_neg = hdev->hw.mac.autoneg;
8257 }
8258
8259 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8260                                  u8 *module_type)
8261 {
8262         struct hclge_vport *vport = hclge_get_vport(handle);
8263         struct hclge_dev *hdev = vport->back;
8264
8265         if (media_type)
8266                 *media_type = hdev->hw.mac.media_type;
8267
8268         if (module_type)
8269                 *module_type = hdev->hw.mac.module_type;
8270 }
8271
8272 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8273                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8274 {
8275         struct hclge_vport *vport = hclge_get_vport(handle);
8276         struct hclge_dev *hdev = vport->back;
8277         struct phy_device *phydev = hdev->hw.mac.phydev;
8278         int mdix_ctrl, mdix, is_resolved;
8279         unsigned int retval;
8280
8281         if (!phydev) {
8282                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8283                 *tp_mdix = ETH_TP_MDI_INVALID;
8284                 return;
8285         }
8286
8287         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8288
8289         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8290         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8291                                     HCLGE_PHY_MDIX_CTRL_S);
8292
8293         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8294         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8295         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8296
8297         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8298
8299         switch (mdix_ctrl) {
8300         case 0x0:
8301                 *tp_mdix_ctrl = ETH_TP_MDI;
8302                 break;
8303         case 0x1:
8304                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8305                 break;
8306         case 0x3:
8307                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8308                 break;
8309         default:
8310                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8311                 break;
8312         }
8313
8314         if (!is_resolved)
8315                 *tp_mdix = ETH_TP_MDI_INVALID;
8316         else if (mdix)
8317                 *tp_mdix = ETH_TP_MDI_X;
8318         else
8319                 *tp_mdix = ETH_TP_MDI;
8320 }
8321
8322 static void hclge_info_show(struct hclge_dev *hdev)
8323 {
8324         struct device *dev = &hdev->pdev->dev;
8325
8326         dev_info(dev, "PF info begin:\n");
8327
8328         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8329         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8330         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8331         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8332         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8333         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8334         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8335         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8336         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8337         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8338         dev_info(dev, "This is %s PF\n",
8339                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8340         dev_info(dev, "DCB %s\n",
8341                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8342         dev_info(dev, "MQPRIO %s\n",
8343                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8344
8345         dev_info(dev, "PF info end.\n");
8346 }
8347
8348 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8349                                           struct hclge_vport *vport)
8350 {
8351         struct hnae3_client *client = vport->nic.client;
8352         struct hclge_dev *hdev = ae_dev->priv;
8353         int rst_cnt;
8354         int ret;
8355
8356         rst_cnt = hdev->rst_stats.reset_cnt;
8357         ret = client->ops->init_instance(&vport->nic);
8358         if (ret)
8359                 return ret;
8360
8361         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8362         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8363             rst_cnt != hdev->rst_stats.reset_cnt) {
8364                 ret = -EBUSY;
8365                 goto init_nic_err;
8366         }
8367
8368         /* Enable nic hw error interrupts */
8369         ret = hclge_config_nic_hw_error(hdev, true);
8370         if (ret) {
8371                 dev_err(&ae_dev->pdev->dev,
8372                         "fail(%d) to enable hw error interrupts\n", ret);
8373                 goto init_nic_err;
8374         }
8375
8376         hnae3_set_client_init_flag(client, ae_dev, 1);
8377
8378         if (netif_msg_drv(&hdev->vport->nic))
8379                 hclge_info_show(hdev);
8380
8381         return ret;
8382
8383 init_nic_err:
8384         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8385         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8386                 msleep(HCLGE_WAIT_RESET_DONE);
8387
8388         client->ops->uninit_instance(&vport->nic, 0);
8389
8390         return ret;
8391 }
8392
8393 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8394                                            struct hclge_vport *vport)
8395 {
8396         struct hnae3_client *client = vport->roce.client;
8397         struct hclge_dev *hdev = ae_dev->priv;
8398         int rst_cnt;
8399         int ret;
8400
8401         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8402             !hdev->nic_client)
8403                 return 0;
8404
8405         client = hdev->roce_client;
8406         ret = hclge_init_roce_base_info(vport);
8407         if (ret)
8408                 return ret;
8409
8410         rst_cnt = hdev->rst_stats.reset_cnt;
8411         ret = client->ops->init_instance(&vport->roce);
8412         if (ret)
8413                 return ret;
8414
8415         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8416         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8417             rst_cnt != hdev->rst_stats.reset_cnt) {
8418                 ret = -EBUSY;
8419                 goto init_roce_err;
8420         }
8421
8422         /* Enable roce ras interrupts */
8423         ret = hclge_config_rocee_ras_interrupt(hdev, true);
8424         if (ret) {
8425                 dev_err(&ae_dev->pdev->dev,
8426                         "fail(%d) to enable roce ras interrupts\n", ret);
8427                 goto init_roce_err;
8428         }
8429
8430         hnae3_set_client_init_flag(client, ae_dev, 1);
8431
8432         return 0;
8433
8434 init_roce_err:
8435         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8436         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8437                 msleep(HCLGE_WAIT_RESET_DONE);
8438
8439         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8440
8441         return ret;
8442 }
8443
8444 static int hclge_init_client_instance(struct hnae3_client *client,
8445                                       struct hnae3_ae_dev *ae_dev)
8446 {
8447         struct hclge_dev *hdev = ae_dev->priv;
8448         struct hclge_vport *vport;
8449         int i, ret;
8450
8451         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8452                 vport = &hdev->vport[i];
8453
8454                 switch (client->type) {
8455                 case HNAE3_CLIENT_KNIC:
8456
8457                         hdev->nic_client = client;
8458                         vport->nic.client = client;
8459                         ret = hclge_init_nic_client_instance(ae_dev, vport);
8460                         if (ret)
8461                                 goto clear_nic;
8462
8463                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8464                         if (ret)
8465                                 goto clear_roce;
8466
8467                         break;
8468                 case HNAE3_CLIENT_ROCE:
8469                         if (hnae3_dev_roce_supported(hdev)) {
8470                                 hdev->roce_client = client;
8471                                 vport->roce.client = client;
8472                         }
8473
8474                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8475                         if (ret)
8476                                 goto clear_roce;
8477
8478                         break;
8479                 default:
8480                         return -EINVAL;
8481                 }
8482         }
8483
8484         return ret;
8485
8486 clear_nic:
8487         hdev->nic_client = NULL;
8488         vport->nic.client = NULL;
8489         return ret;
8490 clear_roce:
8491         hdev->roce_client = NULL;
8492         vport->roce.client = NULL;
8493         return ret;
8494 }
8495
8496 static void hclge_uninit_client_instance(struct hnae3_client *client,
8497                                          struct hnae3_ae_dev *ae_dev)
8498 {
8499         struct hclge_dev *hdev = ae_dev->priv;
8500         struct hclge_vport *vport;
8501         int i;
8502
8503         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8504                 vport = &hdev->vport[i];
8505                 if (hdev->roce_client) {
8506                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8507                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8508                                 msleep(HCLGE_WAIT_RESET_DONE);
8509
8510                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8511                                                                 0);
8512                         hdev->roce_client = NULL;
8513                         vport->roce.client = NULL;
8514                 }
8515                 if (client->type == HNAE3_CLIENT_ROCE)
8516                         return;
8517                 if (hdev->nic_client && client->ops->uninit_instance) {
8518                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8519                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8520                                 msleep(HCLGE_WAIT_RESET_DONE);
8521
8522                         client->ops->uninit_instance(&vport->nic, 0);
8523                         hdev->nic_client = NULL;
8524                         vport->nic.client = NULL;
8525                 }
8526         }
8527 }
8528
8529 static int hclge_pci_init(struct hclge_dev *hdev)
8530 {
8531         struct pci_dev *pdev = hdev->pdev;
8532         struct hclge_hw *hw;
8533         int ret;
8534
8535         ret = pci_enable_device(pdev);
8536         if (ret) {
8537                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8538                 return ret;
8539         }
8540
8541         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8542         if (ret) {
8543                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8544                 if (ret) {
8545                         dev_err(&pdev->dev,
8546                                 "can't set consistent PCI DMA");
8547                         goto err_disable_device;
8548                 }
8549                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8550         }
8551
8552         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8553         if (ret) {
8554                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8555                 goto err_disable_device;
8556         }
8557
8558         pci_set_master(pdev);
8559         hw = &hdev->hw;
8560         hw->io_base = pcim_iomap(pdev, 2, 0);
8561         if (!hw->io_base) {
8562                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8563                 ret = -ENOMEM;
8564                 goto err_clr_master;
8565         }
8566
8567         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8568
8569         return 0;
8570 err_clr_master:
8571         pci_clear_master(pdev);
8572         pci_release_regions(pdev);
8573 err_disable_device:
8574         pci_disable_device(pdev);
8575
8576         return ret;
8577 }
8578
8579 static void hclge_pci_uninit(struct hclge_dev *hdev)
8580 {
8581         struct pci_dev *pdev = hdev->pdev;
8582
8583         pcim_iounmap(pdev, hdev->hw.io_base);
8584         pci_free_irq_vectors(pdev);
8585         pci_clear_master(pdev);
8586         pci_release_mem_regions(pdev);
8587         pci_disable_device(pdev);
8588 }
8589
8590 static void hclge_state_init(struct hclge_dev *hdev)
8591 {
8592         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8593         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8594         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8595         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8596         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8597         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8598 }
8599
8600 static void hclge_state_uninit(struct hclge_dev *hdev)
8601 {
8602         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8603         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8604
8605         if (hdev->service_timer.function)
8606                 del_timer_sync(&hdev->service_timer);
8607         if (hdev->reset_timer.function)
8608                 del_timer_sync(&hdev->reset_timer);
8609         if (hdev->service_task.func)
8610                 cancel_work_sync(&hdev->service_task);
8611         if (hdev->rst_service_task.func)
8612                 cancel_work_sync(&hdev->rst_service_task);
8613         if (hdev->mbx_service_task.func)
8614                 cancel_work_sync(&hdev->mbx_service_task);
8615 }
8616
8617 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8618 {
8619 #define HCLGE_FLR_WAIT_MS       100
8620 #define HCLGE_FLR_WAIT_CNT      50
8621         struct hclge_dev *hdev = ae_dev->priv;
8622         int cnt = 0;
8623
8624         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8625         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8626         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8627         hclge_reset_event(hdev->pdev, NULL);
8628
8629         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8630                cnt++ < HCLGE_FLR_WAIT_CNT)
8631                 msleep(HCLGE_FLR_WAIT_MS);
8632
8633         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8634                 dev_err(&hdev->pdev->dev,
8635                         "flr wait down timeout: %d\n", cnt);
8636 }
8637
8638 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8639 {
8640         struct hclge_dev *hdev = ae_dev->priv;
8641
8642         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8643 }
8644
8645 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
8646 {
8647         u16 i;
8648
8649         for (i = 0; i < hdev->num_alloc_vport; i++) {
8650                 struct hclge_vport *vport = &hdev->vport[i];
8651                 int ret;
8652
8653                  /* Send cmd to clear VF's FUNC_RST_ING */
8654                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
8655                 if (ret)
8656                         dev_warn(&hdev->pdev->dev,
8657                                  "clear vf(%d) rst failed %d!\n",
8658                                  vport->vport_id, ret);
8659         }
8660 }
8661
8662 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8663 {
8664         struct pci_dev *pdev = ae_dev->pdev;
8665         struct hclge_dev *hdev;
8666         int ret;
8667
8668         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8669         if (!hdev) {
8670                 ret = -ENOMEM;
8671                 goto out;
8672         }
8673
8674         hdev->pdev = pdev;
8675         hdev->ae_dev = ae_dev;
8676         hdev->reset_type = HNAE3_NONE_RESET;
8677         hdev->reset_level = HNAE3_FUNC_RESET;
8678         ae_dev->priv = hdev;
8679         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8680
8681         mutex_init(&hdev->vport_lock);
8682         mutex_init(&hdev->vport_cfg_mutex);
8683         spin_lock_init(&hdev->fd_rule_lock);
8684
8685         ret = hclge_pci_init(hdev);
8686         if (ret) {
8687                 dev_err(&pdev->dev, "PCI init failed\n");
8688                 goto out;
8689         }
8690
8691         /* Firmware command queue initialize */
8692         ret = hclge_cmd_queue_init(hdev);
8693         if (ret) {
8694                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8695                 goto err_pci_uninit;
8696         }
8697
8698         /* Firmware command initialize */
8699         ret = hclge_cmd_init(hdev);
8700         if (ret)
8701                 goto err_cmd_uninit;
8702
8703         ret = hclge_get_cap(hdev);
8704         if (ret) {
8705                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8706                         ret);
8707                 goto err_cmd_uninit;
8708         }
8709
8710         ret = hclge_configure(hdev);
8711         if (ret) {
8712                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8713                 goto err_cmd_uninit;
8714         }
8715
8716         ret = hclge_init_msi(hdev);
8717         if (ret) {
8718                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8719                 goto err_cmd_uninit;
8720         }
8721
8722         ret = hclge_misc_irq_init(hdev);
8723         if (ret) {
8724                 dev_err(&pdev->dev,
8725                         "Misc IRQ(vector0) init error, ret = %d.\n",
8726                         ret);
8727                 goto err_msi_uninit;
8728         }
8729
8730         ret = hclge_alloc_tqps(hdev);
8731         if (ret) {
8732                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8733                 goto err_msi_irq_uninit;
8734         }
8735
8736         ret = hclge_alloc_vport(hdev);
8737         if (ret) {
8738                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8739                 goto err_msi_irq_uninit;
8740         }
8741
8742         ret = hclge_map_tqp(hdev);
8743         if (ret) {
8744                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8745                 goto err_msi_irq_uninit;
8746         }
8747
8748         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8749                 ret = hclge_mac_mdio_config(hdev);
8750                 if (ret) {
8751                         dev_err(&hdev->pdev->dev,
8752                                 "mdio config fail ret=%d\n", ret);
8753                         goto err_msi_irq_uninit;
8754                 }
8755         }
8756
8757         ret = hclge_init_umv_space(hdev);
8758         if (ret) {
8759                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8760                 goto err_mdiobus_unreg;
8761         }
8762
8763         ret = hclge_mac_init(hdev);
8764         if (ret) {
8765                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8766                 goto err_mdiobus_unreg;
8767         }
8768
8769         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8770         if (ret) {
8771                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8772                 goto err_mdiobus_unreg;
8773         }
8774
8775         ret = hclge_config_gro(hdev, true);
8776         if (ret)
8777                 goto err_mdiobus_unreg;
8778
8779         ret = hclge_init_vlan_config(hdev);
8780         if (ret) {
8781                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8782                 goto err_mdiobus_unreg;
8783         }
8784
8785         ret = hclge_tm_schd_init(hdev);
8786         if (ret) {
8787                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8788                 goto err_mdiobus_unreg;
8789         }
8790
8791         hclge_rss_init_cfg(hdev);
8792         ret = hclge_rss_init_hw(hdev);
8793         if (ret) {
8794                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8795                 goto err_mdiobus_unreg;
8796         }
8797
8798         ret = init_mgr_tbl(hdev);
8799         if (ret) {
8800                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8801                 goto err_mdiobus_unreg;
8802         }
8803
8804         ret = hclge_init_fd_config(hdev);
8805         if (ret) {
8806                 dev_err(&pdev->dev,
8807                         "fd table init fail, ret=%d\n", ret);
8808                 goto err_mdiobus_unreg;
8809         }
8810
8811         INIT_KFIFO(hdev->mac_tnl_log);
8812
8813         hclge_dcb_ops_set(hdev);
8814
8815         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8816         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8817         INIT_WORK(&hdev->service_task, hclge_service_task);
8818         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8819         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8820
8821         hclge_clear_all_event_cause(hdev);
8822         hclge_clear_resetting_state(hdev);
8823
8824         /* Log and clear the hw errors those already occurred */
8825         hclge_handle_all_hns_hw_errors(ae_dev);
8826
8827         /* request delayed reset for the error recovery because an immediate
8828          * global reset on a PF affecting pending initialization of other PFs
8829          */
8830         if (ae_dev->hw_err_reset_req) {
8831                 enum hnae3_reset_type reset_level;
8832
8833                 reset_level = hclge_get_reset_level(ae_dev,
8834                                                     &ae_dev->hw_err_reset_req);
8835                 hclge_set_def_reset_request(ae_dev, reset_level);
8836                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
8837         }
8838
8839         /* Enable MISC vector(vector0) */
8840         hclge_enable_vector(&hdev->misc_vector, true);
8841
8842         hclge_state_init(hdev);
8843         hdev->last_reset_time = jiffies;
8844
8845         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8846         return 0;
8847
8848 err_mdiobus_unreg:
8849         if (hdev->hw.mac.phydev)
8850                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8851 err_msi_irq_uninit:
8852         hclge_misc_irq_uninit(hdev);
8853 err_msi_uninit:
8854         pci_free_irq_vectors(pdev);
8855 err_cmd_uninit:
8856         hclge_cmd_uninit(hdev);
8857 err_pci_uninit:
8858         pcim_iounmap(pdev, hdev->hw.io_base);
8859         pci_clear_master(pdev);
8860         pci_release_regions(pdev);
8861         pci_disable_device(pdev);
8862 out:
8863         return ret;
8864 }
8865
8866 static void hclge_stats_clear(struct hclge_dev *hdev)
8867 {
8868         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8869 }
8870
8871 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8872 {
8873         struct hclge_vport *vport = hdev->vport;
8874         int i;
8875
8876         for (i = 0; i < hdev->num_alloc_vport; i++) {
8877                 hclge_vport_stop(vport);
8878                 vport++;
8879         }
8880 }
8881
8882 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8883 {
8884         struct hclge_dev *hdev = ae_dev->priv;
8885         struct pci_dev *pdev = ae_dev->pdev;
8886         int ret;
8887
8888         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8889
8890         hclge_stats_clear(hdev);
8891         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8892         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8893
8894         ret = hclge_cmd_init(hdev);
8895         if (ret) {
8896                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8897                 return ret;
8898         }
8899
8900         ret = hclge_map_tqp(hdev);
8901         if (ret) {
8902                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8903                 return ret;
8904         }
8905
8906         hclge_reset_umv_space(hdev);
8907
8908         ret = hclge_mac_init(hdev);
8909         if (ret) {
8910                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8911                 return ret;
8912         }
8913
8914         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8915         if (ret) {
8916                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8917                 return ret;
8918         }
8919
8920         ret = hclge_config_gro(hdev, true);
8921         if (ret)
8922                 return ret;
8923
8924         ret = hclge_init_vlan_config(hdev);
8925         if (ret) {
8926                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8927                 return ret;
8928         }
8929
8930         ret = hclge_tm_init_hw(hdev, true);
8931         if (ret) {
8932                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8933                 return ret;
8934         }
8935
8936         ret = hclge_rss_init_hw(hdev);
8937         if (ret) {
8938                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8939                 return ret;
8940         }
8941
8942         ret = hclge_init_fd_config(hdev);
8943         if (ret) {
8944                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
8945                 return ret;
8946         }
8947
8948         /* Re-enable the hw error interrupts because
8949          * the interrupts get disabled on global reset.
8950          */
8951         ret = hclge_config_nic_hw_error(hdev, true);
8952         if (ret) {
8953                 dev_err(&pdev->dev,
8954                         "fail(%d) to re-enable NIC hw error interrupts\n",
8955                         ret);
8956                 return ret;
8957         }
8958
8959         if (hdev->roce_client) {
8960                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8961                 if (ret) {
8962                         dev_err(&pdev->dev,
8963                                 "fail(%d) to re-enable roce ras interrupts\n",
8964                                 ret);
8965                         return ret;
8966                 }
8967         }
8968
8969         hclge_reset_vport_state(hdev);
8970
8971         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8972                  HCLGE_DRIVER_NAME);
8973
8974         return 0;
8975 }
8976
8977 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8978 {
8979         struct hclge_dev *hdev = ae_dev->priv;
8980         struct hclge_mac *mac = &hdev->hw.mac;
8981
8982         hclge_state_uninit(hdev);
8983
8984         if (mac->phydev)
8985                 mdiobus_unregister(mac->mdio_bus);
8986
8987         hclge_uninit_umv_space(hdev);
8988
8989         /* Disable MISC vector(vector0) */
8990         hclge_enable_vector(&hdev->misc_vector, false);
8991         synchronize_irq(hdev->misc_vector.vector_irq);
8992
8993         /* Disable all hw interrupts */
8994         hclge_config_mac_tnl_int(hdev, false);
8995         hclge_config_nic_hw_error(hdev, false);
8996         hclge_config_rocee_ras_interrupt(hdev, false);
8997
8998         hclge_cmd_uninit(hdev);
8999         hclge_misc_irq_uninit(hdev);
9000         hclge_pci_uninit(hdev);
9001         mutex_destroy(&hdev->vport_lock);
9002         hclge_uninit_vport_mac_table(hdev);
9003         hclge_uninit_vport_vlan_table(hdev);
9004         mutex_destroy(&hdev->vport_cfg_mutex);
9005         ae_dev->priv = NULL;
9006 }
9007
9008 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9009 {
9010         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9011         struct hclge_vport *vport = hclge_get_vport(handle);
9012         struct hclge_dev *hdev = vport->back;
9013
9014         return min_t(u32, hdev->rss_size_max,
9015                      vport->alloc_tqps / kinfo->num_tc);
9016 }
9017
9018 static void hclge_get_channels(struct hnae3_handle *handle,
9019                                struct ethtool_channels *ch)
9020 {
9021         ch->max_combined = hclge_get_max_channels(handle);
9022         ch->other_count = 1;
9023         ch->max_other = 1;
9024         ch->combined_count = handle->kinfo.rss_size;
9025 }
9026
9027 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9028                                         u16 *alloc_tqps, u16 *max_rss_size)
9029 {
9030         struct hclge_vport *vport = hclge_get_vport(handle);
9031         struct hclge_dev *hdev = vport->back;
9032
9033         *alloc_tqps = vport->alloc_tqps;
9034         *max_rss_size = hdev->rss_size_max;
9035 }
9036
9037 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9038                               bool rxfh_configured)
9039 {
9040         struct hclge_vport *vport = hclge_get_vport(handle);
9041         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9042         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9043         struct hclge_dev *hdev = vport->back;
9044         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9045         int cur_rss_size = kinfo->rss_size;
9046         int cur_tqps = kinfo->num_tqps;
9047         u16 tc_valid[HCLGE_MAX_TC_NUM];
9048         u16 roundup_size;
9049         u32 *rss_indir;
9050         unsigned int i;
9051         int ret;
9052
9053         kinfo->req_rss_size = new_tqps_num;
9054
9055         ret = hclge_tm_vport_map_update(hdev);
9056         if (ret) {
9057                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9058                 return ret;
9059         }
9060
9061         roundup_size = roundup_pow_of_two(kinfo->rss_size);
9062         roundup_size = ilog2(roundup_size);
9063         /* Set the RSS TC mode according to the new RSS size */
9064         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9065                 tc_valid[i] = 0;
9066
9067                 if (!(hdev->hw_tc_map & BIT(i)))
9068                         continue;
9069
9070                 tc_valid[i] = 1;
9071                 tc_size[i] = roundup_size;
9072                 tc_offset[i] = kinfo->rss_size * i;
9073         }
9074         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9075         if (ret)
9076                 return ret;
9077
9078         /* RSS indirection table has been configuared by user */
9079         if (rxfh_configured)
9080                 goto out;
9081
9082         /* Reinitializes the rss indirect table according to the new RSS size */
9083         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9084         if (!rss_indir)
9085                 return -ENOMEM;
9086
9087         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9088                 rss_indir[i] = i % kinfo->rss_size;
9089
9090         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9091         if (ret)
9092                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9093                         ret);
9094
9095         kfree(rss_indir);
9096
9097 out:
9098         if (!ret)
9099                 dev_info(&hdev->pdev->dev,
9100                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9101                          cur_rss_size, kinfo->rss_size,
9102                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
9103
9104         return ret;
9105 }
9106
9107 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9108                               u32 *regs_num_64_bit)
9109 {
9110         struct hclge_desc desc;
9111         u32 total_num;
9112         int ret;
9113
9114         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9115         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9116         if (ret) {
9117                 dev_err(&hdev->pdev->dev,
9118                         "Query register number cmd failed, ret = %d.\n", ret);
9119                 return ret;
9120         }
9121
9122         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9123         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9124
9125         total_num = *regs_num_32_bit + *regs_num_64_bit;
9126         if (!total_num)
9127                 return -EINVAL;
9128
9129         return 0;
9130 }
9131
9132 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9133                                  void *data)
9134 {
9135 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9136 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9137
9138         struct hclge_desc *desc;
9139         u32 *reg_val = data;
9140         __le32 *desc_data;
9141         int nodata_num;
9142         int cmd_num;
9143         int i, k, n;
9144         int ret;
9145
9146         if (regs_num == 0)
9147                 return 0;
9148
9149         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9150         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9151                                HCLGE_32_BIT_REG_RTN_DATANUM);
9152         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9153         if (!desc)
9154                 return -ENOMEM;
9155
9156         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9157         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9158         if (ret) {
9159                 dev_err(&hdev->pdev->dev,
9160                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
9161                 kfree(desc);
9162                 return ret;
9163         }
9164
9165         for (i = 0; i < cmd_num; i++) {
9166                 if (i == 0) {
9167                         desc_data = (__le32 *)(&desc[i].data[0]);
9168                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9169                 } else {
9170                         desc_data = (__le32 *)(&desc[i]);
9171                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
9172                 }
9173                 for (k = 0; k < n; k++) {
9174                         *reg_val++ = le32_to_cpu(*desc_data++);
9175
9176                         regs_num--;
9177                         if (!regs_num)
9178                                 break;
9179                 }
9180         }
9181
9182         kfree(desc);
9183         return 0;
9184 }
9185
9186 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9187                                  void *data)
9188 {
9189 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9190 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9191
9192         struct hclge_desc *desc;
9193         u64 *reg_val = data;
9194         __le64 *desc_data;
9195         int nodata_len;
9196         int cmd_num;
9197         int i, k, n;
9198         int ret;
9199
9200         if (regs_num == 0)
9201                 return 0;
9202
9203         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9204         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9205                                HCLGE_64_BIT_REG_RTN_DATANUM);
9206         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9207         if (!desc)
9208                 return -ENOMEM;
9209
9210         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9211         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9212         if (ret) {
9213                 dev_err(&hdev->pdev->dev,
9214                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
9215                 kfree(desc);
9216                 return ret;
9217         }
9218
9219         for (i = 0; i < cmd_num; i++) {
9220                 if (i == 0) {
9221                         desc_data = (__le64 *)(&desc[i].data[0]);
9222                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9223                 } else {
9224                         desc_data = (__le64 *)(&desc[i]);
9225                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
9226                 }
9227                 for (k = 0; k < n; k++) {
9228                         *reg_val++ = le64_to_cpu(*desc_data++);
9229
9230                         regs_num--;
9231                         if (!regs_num)
9232                                 break;
9233                 }
9234         }
9235
9236         kfree(desc);
9237         return 0;
9238 }
9239
9240 #define MAX_SEPARATE_NUM        4
9241 #define SEPARATOR_VALUE         0xFFFFFFFF
9242 #define REG_NUM_PER_LINE        4
9243 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
9244
9245 static int hclge_get_regs_len(struct hnae3_handle *handle)
9246 {
9247         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9248         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9249         struct hclge_vport *vport = hclge_get_vport(handle);
9250         struct hclge_dev *hdev = vport->back;
9251         u32 regs_num_32_bit, regs_num_64_bit;
9252         int ret;
9253
9254         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9255         if (ret) {
9256                 dev_err(&hdev->pdev->dev,
9257                         "Get register number failed, ret = %d.\n", ret);
9258                 return -EOPNOTSUPP;
9259         }
9260
9261         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9262         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9263         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9264         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9265
9266         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9267                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9268                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9269 }
9270
9271 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9272                            void *data)
9273 {
9274         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9275         struct hclge_vport *vport = hclge_get_vport(handle);
9276         struct hclge_dev *hdev = vport->back;
9277         u32 regs_num_32_bit, regs_num_64_bit;
9278         int i, j, reg_um, separator_num;
9279         u32 *reg = data;
9280         int ret;
9281
9282         *version = hdev->fw_version;
9283
9284         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9285         if (ret) {
9286                 dev_err(&hdev->pdev->dev,
9287                         "Get register number failed, ret = %d.\n", ret);
9288                 return;
9289         }
9290
9291         /* fetching per-PF registers valus from PF PCIe register space */
9292         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9293         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9294         for (i = 0; i < reg_um; i++)
9295                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9296         for (i = 0; i < separator_num; i++)
9297                 *reg++ = SEPARATOR_VALUE;
9298
9299         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9300         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9301         for (i = 0; i < reg_um; i++)
9302                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9303         for (i = 0; i < separator_num; i++)
9304                 *reg++ = SEPARATOR_VALUE;
9305
9306         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9307         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9308         for (j = 0; j < kinfo->num_tqps; j++) {
9309                 for (i = 0; i < reg_um; i++)
9310                         *reg++ = hclge_read_dev(&hdev->hw,
9311                                                 ring_reg_addr_list[i] +
9312                                                 0x200 * j);
9313                 for (i = 0; i < separator_num; i++)
9314                         *reg++ = SEPARATOR_VALUE;
9315         }
9316
9317         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9318         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9319         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9320                 for (i = 0; i < reg_um; i++)
9321                         *reg++ = hclge_read_dev(&hdev->hw,
9322                                                 tqp_intr_reg_addr_list[i] +
9323                                                 4 * j);
9324                 for (i = 0; i < separator_num; i++)
9325                         *reg++ = SEPARATOR_VALUE;
9326         }
9327
9328         /* fetching PF common registers values from firmware */
9329         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9330         if (ret) {
9331                 dev_err(&hdev->pdev->dev,
9332                         "Get 32 bit register failed, ret = %d.\n", ret);
9333                 return;
9334         }
9335
9336         reg += regs_num_32_bit;
9337         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9338         if (ret)
9339                 dev_err(&hdev->pdev->dev,
9340                         "Get 64 bit register failed, ret = %d.\n", ret);
9341 }
9342
9343 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9344 {
9345         struct hclge_set_led_state_cmd *req;
9346         struct hclge_desc desc;
9347         int ret;
9348
9349         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9350
9351         req = (struct hclge_set_led_state_cmd *)desc.data;
9352         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9353                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9354
9355         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9356         if (ret)
9357                 dev_err(&hdev->pdev->dev,
9358                         "Send set led state cmd error, ret =%d\n", ret);
9359
9360         return ret;
9361 }
9362
9363 enum hclge_led_status {
9364         HCLGE_LED_OFF,
9365         HCLGE_LED_ON,
9366         HCLGE_LED_NO_CHANGE = 0xFF,
9367 };
9368
9369 static int hclge_set_led_id(struct hnae3_handle *handle,
9370                             enum ethtool_phys_id_state status)
9371 {
9372         struct hclge_vport *vport = hclge_get_vport(handle);
9373         struct hclge_dev *hdev = vport->back;
9374
9375         switch (status) {
9376         case ETHTOOL_ID_ACTIVE:
9377                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9378         case ETHTOOL_ID_INACTIVE:
9379                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9380         default:
9381                 return -EINVAL;
9382         }
9383 }
9384
9385 static void hclge_get_link_mode(struct hnae3_handle *handle,
9386                                 unsigned long *supported,
9387                                 unsigned long *advertising)
9388 {
9389         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9390         struct hclge_vport *vport = hclge_get_vport(handle);
9391         struct hclge_dev *hdev = vport->back;
9392         unsigned int idx = 0;
9393
9394         for (; idx < size; idx++) {
9395                 supported[idx] = hdev->hw.mac.supported[idx];
9396                 advertising[idx] = hdev->hw.mac.advertising[idx];
9397         }
9398 }
9399
9400 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9401 {
9402         struct hclge_vport *vport = hclge_get_vport(handle);
9403         struct hclge_dev *hdev = vport->back;
9404
9405         return hclge_config_gro(hdev, enable);
9406 }
9407
9408 static const struct hnae3_ae_ops hclge_ops = {
9409         .init_ae_dev = hclge_init_ae_dev,
9410         .uninit_ae_dev = hclge_uninit_ae_dev,
9411         .flr_prepare = hclge_flr_prepare,
9412         .flr_done = hclge_flr_done,
9413         .init_client_instance = hclge_init_client_instance,
9414         .uninit_client_instance = hclge_uninit_client_instance,
9415         .map_ring_to_vector = hclge_map_ring_to_vector,
9416         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9417         .get_vector = hclge_get_vector,
9418         .put_vector = hclge_put_vector,
9419         .set_promisc_mode = hclge_set_promisc_mode,
9420         .set_loopback = hclge_set_loopback,
9421         .start = hclge_ae_start,
9422         .stop = hclge_ae_stop,
9423         .client_start = hclge_client_start,
9424         .client_stop = hclge_client_stop,
9425         .get_status = hclge_get_status,
9426         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9427         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9428         .get_media_type = hclge_get_media_type,
9429         .check_port_speed = hclge_check_port_speed,
9430         .get_fec = hclge_get_fec,
9431         .set_fec = hclge_set_fec,
9432         .get_rss_key_size = hclge_get_rss_key_size,
9433         .get_rss_indir_size = hclge_get_rss_indir_size,
9434         .get_rss = hclge_get_rss,
9435         .set_rss = hclge_set_rss,
9436         .set_rss_tuple = hclge_set_rss_tuple,
9437         .get_rss_tuple = hclge_get_rss_tuple,
9438         .get_tc_size = hclge_get_tc_size,
9439         .get_mac_addr = hclge_get_mac_addr,
9440         .set_mac_addr = hclge_set_mac_addr,
9441         .do_ioctl = hclge_do_ioctl,
9442         .add_uc_addr = hclge_add_uc_addr,
9443         .rm_uc_addr = hclge_rm_uc_addr,
9444         .add_mc_addr = hclge_add_mc_addr,
9445         .rm_mc_addr = hclge_rm_mc_addr,
9446         .set_autoneg = hclge_set_autoneg,
9447         .get_autoneg = hclge_get_autoneg,
9448         .restart_autoneg = hclge_restart_autoneg,
9449         .halt_autoneg = hclge_halt_autoneg,
9450         .get_pauseparam = hclge_get_pauseparam,
9451         .set_pauseparam = hclge_set_pauseparam,
9452         .set_mtu = hclge_set_mtu,
9453         .reset_queue = hclge_reset_tqp,
9454         .get_stats = hclge_get_stats,
9455         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9456         .update_stats = hclge_update_stats,
9457         .get_strings = hclge_get_strings,
9458         .get_sset_count = hclge_get_sset_count,
9459         .get_fw_version = hclge_get_fw_version,
9460         .get_mdix_mode = hclge_get_mdix_mode,
9461         .enable_vlan_filter = hclge_enable_vlan_filter,
9462         .set_vlan_filter = hclge_set_vlan_filter,
9463         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9464         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9465         .reset_event = hclge_reset_event,
9466         .get_reset_level = hclge_get_reset_level,
9467         .set_default_reset_request = hclge_set_def_reset_request,
9468         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9469         .set_channels = hclge_set_channels,
9470         .get_channels = hclge_get_channels,
9471         .get_regs_len = hclge_get_regs_len,
9472         .get_regs = hclge_get_regs,
9473         .set_led_id = hclge_set_led_id,
9474         .get_link_mode = hclge_get_link_mode,
9475         .add_fd_entry = hclge_add_fd_entry,
9476         .del_fd_entry = hclge_del_fd_entry,
9477         .del_all_fd_entries = hclge_del_all_fd_entries,
9478         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9479         .get_fd_rule_info = hclge_get_fd_rule_info,
9480         .get_fd_all_rules = hclge_get_all_rules,
9481         .restore_fd_rules = hclge_restore_fd_entries,
9482         .enable_fd = hclge_enable_fd,
9483         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9484         .dbg_run_cmd = hclge_dbg_run_cmd,
9485         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9486         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9487         .ae_dev_resetting = hclge_ae_dev_resetting,
9488         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9489         .set_gro_en = hclge_gro_en,
9490         .get_global_queue_id = hclge_covert_handle_qid_global,
9491         .set_timer_task = hclge_set_timer_task,
9492         .mac_connect_phy = hclge_mac_connect_phy,
9493         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9494         .restore_vlan_table = hclge_restore_vlan_table,
9495 };
9496
9497 static struct hnae3_ae_algo ae_algo = {
9498         .ops = &hclge_ops,
9499         .pdev_id_table = ae_algo_pci_tbl,
9500 };
9501
9502 static int hclge_init(void)
9503 {
9504         pr_info("%s is initializing\n", HCLGE_NAME);
9505
9506         hnae3_register_ae_algo(&ae_algo);
9507
9508         return 0;
9509 }
9510
9511 static void hclge_exit(void)
9512 {
9513         hnae3_unregister_ae_algo(&ae_algo);
9514 }
9515 module_init(hclge_init);
9516 module_exit(hclge_exit);
9517
9518 MODULE_LICENSE("GPL");
9519 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9520 MODULE_DESCRIPTION("HCLGE Driver");
9521 MODULE_VERSION(HCLGE_MOD_VERSION);