a2401c34a89d5a7e0d6b368bac7e8e06b3576d95
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38
39 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
40 static int hclge_init_vlan_config(struct hclge_dev *hdev);
41 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
42 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
43 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
44 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
45                                u16 *allocated_size, bool is_alloc);
46 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
47 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
48 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
49                                                    unsigned long *addr);
50
51 static struct hnae3_ae_algo ae_algo;
52
53 static const struct pci_device_id ae_algo_pci_tbl[] = {
54         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
55         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
56         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
57         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
58         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
59         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
60         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
61         /* required last entry */
62         {0, }
63 };
64
65 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
66
67 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
68                                          HCLGE_CMDQ_TX_ADDR_H_REG,
69                                          HCLGE_CMDQ_TX_DEPTH_REG,
70                                          HCLGE_CMDQ_TX_TAIL_REG,
71                                          HCLGE_CMDQ_TX_HEAD_REG,
72                                          HCLGE_CMDQ_RX_ADDR_L_REG,
73                                          HCLGE_CMDQ_RX_ADDR_H_REG,
74                                          HCLGE_CMDQ_RX_DEPTH_REG,
75                                          HCLGE_CMDQ_RX_TAIL_REG,
76                                          HCLGE_CMDQ_RX_HEAD_REG,
77                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
78                                          HCLGE_CMDQ_INTR_STS_REG,
79                                          HCLGE_CMDQ_INTR_EN_REG,
80                                          HCLGE_CMDQ_INTR_GEN_REG};
81
82 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
83                                            HCLGE_VECTOR0_OTER_EN_REG,
84                                            HCLGE_MISC_RESET_STS_REG,
85                                            HCLGE_MISC_VECTOR_INT_STS,
86                                            HCLGE_GLOBAL_RESET_REG,
87                                            HCLGE_FUN_RST_ING,
88                                            HCLGE_GRO_EN_REG};
89
90 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
91                                          HCLGE_RING_RX_ADDR_H_REG,
92                                          HCLGE_RING_RX_BD_NUM_REG,
93                                          HCLGE_RING_RX_BD_LENGTH_REG,
94                                          HCLGE_RING_RX_MERGE_EN_REG,
95                                          HCLGE_RING_RX_TAIL_REG,
96                                          HCLGE_RING_RX_HEAD_REG,
97                                          HCLGE_RING_RX_FBD_NUM_REG,
98                                          HCLGE_RING_RX_OFFSET_REG,
99                                          HCLGE_RING_RX_FBD_OFFSET_REG,
100                                          HCLGE_RING_RX_STASH_REG,
101                                          HCLGE_RING_RX_BD_ERR_REG,
102                                          HCLGE_RING_TX_ADDR_L_REG,
103                                          HCLGE_RING_TX_ADDR_H_REG,
104                                          HCLGE_RING_TX_BD_NUM_REG,
105                                          HCLGE_RING_TX_PRIORITY_REG,
106                                          HCLGE_RING_TX_TC_REG,
107                                          HCLGE_RING_TX_MERGE_EN_REG,
108                                          HCLGE_RING_TX_TAIL_REG,
109                                          HCLGE_RING_TX_HEAD_REG,
110                                          HCLGE_RING_TX_FBD_NUM_REG,
111                                          HCLGE_RING_TX_OFFSET_REG,
112                                          HCLGE_RING_TX_EBD_NUM_REG,
113                                          HCLGE_RING_TX_EBD_OFFSET_REG,
114                                          HCLGE_RING_TX_BD_ERR_REG,
115                                          HCLGE_RING_EN_REG};
116
117 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
118                                              HCLGE_TQP_INTR_GL0_REG,
119                                              HCLGE_TQP_INTR_GL1_REG,
120                                              HCLGE_TQP_INTR_GL2_REG,
121                                              HCLGE_TQP_INTR_RL_REG};
122
123 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
124         "App    Loopback test",
125         "Serdes serial Loopback test",
126         "Serdes parallel Loopback test",
127         "Phy    Loopback test"
128 };
129
130 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
131         {"mac_tx_mac_pause_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
133         {"mac_rx_mac_pause_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
135         {"mac_tx_control_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
137         {"mac_rx_control_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
139         {"mac_tx_pfc_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
141         {"mac_tx_pfc_pri0_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
143         {"mac_tx_pfc_pri1_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
145         {"mac_tx_pfc_pri2_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
147         {"mac_tx_pfc_pri3_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
149         {"mac_tx_pfc_pri4_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
151         {"mac_tx_pfc_pri5_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
153         {"mac_tx_pfc_pri6_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
155         {"mac_tx_pfc_pri7_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
157         {"mac_rx_pfc_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
159         {"mac_rx_pfc_pri0_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
161         {"mac_rx_pfc_pri1_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
163         {"mac_rx_pfc_pri2_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
165         {"mac_rx_pfc_pri3_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
167         {"mac_rx_pfc_pri4_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
169         {"mac_rx_pfc_pri5_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
171         {"mac_rx_pfc_pri6_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
173         {"mac_rx_pfc_pri7_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
175         {"mac_tx_total_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
177         {"mac_tx_total_oct_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
179         {"mac_tx_good_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
181         {"mac_tx_bad_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
183         {"mac_tx_good_oct_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
185         {"mac_tx_bad_oct_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
187         {"mac_tx_uni_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
189         {"mac_tx_multi_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
191         {"mac_tx_broad_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
193         {"mac_tx_undersize_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
195         {"mac_tx_oversize_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
197         {"mac_tx_64_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
199         {"mac_tx_65_127_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
201         {"mac_tx_128_255_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
203         {"mac_tx_256_511_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
205         {"mac_tx_512_1023_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
207         {"mac_tx_1024_1518_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
209         {"mac_tx_1519_2047_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
211         {"mac_tx_2048_4095_oct_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
213         {"mac_tx_4096_8191_oct_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
215         {"mac_tx_8192_9216_oct_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
217         {"mac_tx_9217_12287_oct_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
219         {"mac_tx_12288_16383_oct_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
221         {"mac_tx_1519_max_good_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
223         {"mac_tx_1519_max_bad_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
225         {"mac_rx_total_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
227         {"mac_rx_total_oct_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
229         {"mac_rx_good_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
231         {"mac_rx_bad_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
233         {"mac_rx_good_oct_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
235         {"mac_rx_bad_oct_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
237         {"mac_rx_uni_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
239         {"mac_rx_multi_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
241         {"mac_rx_broad_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
243         {"mac_rx_undersize_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
245         {"mac_rx_oversize_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
247         {"mac_rx_64_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
249         {"mac_rx_65_127_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
251         {"mac_rx_128_255_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
253         {"mac_rx_256_511_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
255         {"mac_rx_512_1023_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
257         {"mac_rx_1024_1518_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
259         {"mac_rx_1519_2047_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
261         {"mac_rx_2048_4095_oct_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
263         {"mac_rx_4096_8191_oct_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
265         {"mac_rx_8192_9216_oct_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
267         {"mac_rx_9217_12287_oct_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
269         {"mac_rx_12288_16383_oct_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
271         {"mac_rx_1519_max_good_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
273         {"mac_rx_1519_max_bad_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
275
276         {"mac_tx_fragment_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
278         {"mac_tx_undermin_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
280         {"mac_tx_jabber_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
282         {"mac_tx_err_all_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
284         {"mac_tx_from_app_good_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
286         {"mac_tx_from_app_bad_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
288         {"mac_rx_fragment_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
290         {"mac_rx_undermin_pkt_num",
291                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
292         {"mac_rx_jabber_pkt_num",
293                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
294         {"mac_rx_fcs_err_pkt_num",
295                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
296         {"mac_rx_send_app_good_pkt_num",
297                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
298         {"mac_rx_send_app_bad_pkt_num",
299                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
300 };
301
302 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
303         {
304                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
305                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
306                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
307                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
308                 .i_port_bitmap = 0x1,
309         },
310 };
311
312 static const u8 hclge_hash_key[] = {
313         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
314         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
315         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
316         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
317         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
318 };
319
320 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
321 {
322 #define HCLGE_MAC_CMD_NUM 21
323
324         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
325         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
326         __le64 *desc_data;
327         int i, k, n;
328         int ret;
329
330         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
331         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
332         if (ret) {
333                 dev_err(&hdev->pdev->dev,
334                         "Get MAC pkt stats fail, status = %d.\n", ret);
335
336                 return ret;
337         }
338
339         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
340                 /* for special opcode 0032, only the first desc has the head */
341                 if (unlikely(i == 0)) {
342                         desc_data = (__le64 *)(&desc[i].data[0]);
343                         n = HCLGE_RD_FIRST_STATS_NUM;
344                 } else {
345                         desc_data = (__le64 *)(&desc[i]);
346                         n = HCLGE_RD_OTHER_STATS_NUM;
347                 }
348
349                 for (k = 0; k < n; k++) {
350                         *data += le64_to_cpu(*desc_data);
351                         data++;
352                         desc_data++;
353                 }
354         }
355
356         return 0;
357 }
358
359 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
360 {
361         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
362         struct hclge_desc *desc;
363         __le64 *desc_data;
364         u16 i, k, n;
365         int ret;
366
367         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
368         if (!desc)
369                 return -ENOMEM;
370         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
371         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
372         if (ret) {
373                 kfree(desc);
374                 return ret;
375         }
376
377         for (i = 0; i < desc_num; i++) {
378                 /* for special opcode 0034, only the first desc has the head */
379                 if (i == 0) {
380                         desc_data = (__le64 *)(&desc[i].data[0]);
381                         n = HCLGE_RD_FIRST_STATS_NUM;
382                 } else {
383                         desc_data = (__le64 *)(&desc[i]);
384                         n = HCLGE_RD_OTHER_STATS_NUM;
385                 }
386
387                 for (k = 0; k < n; k++) {
388                         *data += le64_to_cpu(*desc_data);
389                         data++;
390                         desc_data++;
391                 }
392         }
393
394         kfree(desc);
395
396         return 0;
397 }
398
399 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
400 {
401         struct hclge_desc desc;
402         __le32 *desc_data;
403         u32 reg_num;
404         int ret;
405
406         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
407         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
408         if (ret)
409                 return ret;
410
411         desc_data = (__le32 *)(&desc.data[0]);
412         reg_num = le32_to_cpu(*desc_data);
413
414         *desc_num = 1 + ((reg_num - 3) >> 2) +
415                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
416
417         return 0;
418 }
419
420 static int hclge_mac_update_stats(struct hclge_dev *hdev)
421 {
422         u32 desc_num;
423         int ret;
424
425         ret = hclge_mac_query_reg_num(hdev, &desc_num);
426
427         /* The firmware supports the new statistics acquisition method */
428         if (!ret)
429                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
430         else if (ret == -EOPNOTSUPP)
431                 ret = hclge_mac_update_stats_defective(hdev);
432         else
433                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
434
435         return ret;
436 }
437
438 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
439 {
440         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
441         struct hclge_vport *vport = hclge_get_vport(handle);
442         struct hclge_dev *hdev = vport->back;
443         struct hnae3_queue *queue;
444         struct hclge_desc desc[1];
445         struct hclge_tqp *tqp;
446         int ret, i;
447
448         for (i = 0; i < kinfo->num_tqps; i++) {
449                 queue = handle->kinfo.tqp[i];
450                 tqp = container_of(queue, struct hclge_tqp, q);
451                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
452                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
453                                            true);
454
455                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
456                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
457                 if (ret) {
458                         dev_err(&hdev->pdev->dev,
459                                 "Query tqp stat fail, status = %d,queue = %d\n",
460                                 ret, i);
461                         return ret;
462                 }
463                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
464                         le32_to_cpu(desc[0].data[1]);
465         }
466
467         for (i = 0; i < kinfo->num_tqps; i++) {
468                 queue = handle->kinfo.tqp[i];
469                 tqp = container_of(queue, struct hclge_tqp, q);
470                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
471                 hclge_cmd_setup_basic_desc(&desc[0],
472                                            HCLGE_OPC_QUERY_TX_STATUS,
473                                            true);
474
475                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
476                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
477                 if (ret) {
478                         dev_err(&hdev->pdev->dev,
479                                 "Query tqp stat fail, status = %d,queue = %d\n",
480                                 ret, i);
481                         return ret;
482                 }
483                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
484                         le32_to_cpu(desc[0].data[1]);
485         }
486
487         return 0;
488 }
489
490 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
491 {
492         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
493         struct hclge_tqp *tqp;
494         u64 *buff = data;
495         int i;
496
497         for (i = 0; i < kinfo->num_tqps; i++) {
498                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
499                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
500         }
501
502         for (i = 0; i < kinfo->num_tqps; i++) {
503                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
504                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
505         }
506
507         return buff;
508 }
509
510 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
511 {
512         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
513
514         /* each tqp has TX & RX two queues */
515         return kinfo->num_tqps * (2);
516 }
517
518 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
519 {
520         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
521         u8 *buff = data;
522         int i = 0;
523
524         for (i = 0; i < kinfo->num_tqps; i++) {
525                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
526                         struct hclge_tqp, q);
527                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
528                          tqp->index);
529                 buff = buff + ETH_GSTRING_LEN;
530         }
531
532         for (i = 0; i < kinfo->num_tqps; i++) {
533                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
534                         struct hclge_tqp, q);
535                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
536                          tqp->index);
537                 buff = buff + ETH_GSTRING_LEN;
538         }
539
540         return buff;
541 }
542
543 static u64 *hclge_comm_get_stats(const void *comm_stats,
544                                  const struct hclge_comm_stats_str strs[],
545                                  int size, u64 *data)
546 {
547         u64 *buf = data;
548         u32 i;
549
550         for (i = 0; i < size; i++)
551                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
552
553         return buf + size;
554 }
555
556 static u8 *hclge_comm_get_strings(u32 stringset,
557                                   const struct hclge_comm_stats_str strs[],
558                                   int size, u8 *data)
559 {
560         char *buff = (char *)data;
561         u32 i;
562
563         if (stringset != ETH_SS_STATS)
564                 return buff;
565
566         for (i = 0; i < size; i++) {
567                 snprintf(buff, ETH_GSTRING_LEN,
568                          strs[i].desc);
569                 buff = buff + ETH_GSTRING_LEN;
570         }
571
572         return (u8 *)buff;
573 }
574
575 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
576 {
577         struct hnae3_handle *handle;
578         int status;
579
580         handle = &hdev->vport[0].nic;
581         if (handle->client) {
582                 status = hclge_tqps_update_stats(handle);
583                 if (status) {
584                         dev_err(&hdev->pdev->dev,
585                                 "Update TQPS stats fail, status = %d.\n",
586                                 status);
587                 }
588         }
589
590         status = hclge_mac_update_stats(hdev);
591         if (status)
592                 dev_err(&hdev->pdev->dev,
593                         "Update MAC stats fail, status = %d.\n", status);
594 }
595
596 static void hclge_update_stats(struct hnae3_handle *handle,
597                                struct net_device_stats *net_stats)
598 {
599         struct hclge_vport *vport = hclge_get_vport(handle);
600         struct hclge_dev *hdev = vport->back;
601         int status;
602
603         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
604                 return;
605
606         status = hclge_mac_update_stats(hdev);
607         if (status)
608                 dev_err(&hdev->pdev->dev,
609                         "Update MAC stats fail, status = %d.\n",
610                         status);
611
612         status = hclge_tqps_update_stats(handle);
613         if (status)
614                 dev_err(&hdev->pdev->dev,
615                         "Update TQPS stats fail, status = %d.\n",
616                         status);
617
618         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
619 }
620
621 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
622 {
623 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
624                 HNAE3_SUPPORT_PHY_LOOPBACK |\
625                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
626                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
627
628         struct hclge_vport *vport = hclge_get_vport(handle);
629         struct hclge_dev *hdev = vport->back;
630         int count = 0;
631
632         /* Loopback test support rules:
633          * mac: only GE mode support
634          * serdes: all mac mode will support include GE/XGE/LGE/CGE
635          * phy: only support when phy device exist on board
636          */
637         if (stringset == ETH_SS_TEST) {
638                 /* clear loopback bit flags at first */
639                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
640                 if (hdev->pdev->revision >= 0x21 ||
641                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
642                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
643                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
644                         count += 1;
645                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
646                 }
647
648                 count += 2;
649                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
650                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
651         } else if (stringset == ETH_SS_STATS) {
652                 count = ARRAY_SIZE(g_mac_stats_string) +
653                         hclge_tqps_get_sset_count(handle, stringset);
654         }
655
656         return count;
657 }
658
659 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
660                               u8 *data)
661 {
662         u8 *p = (char *)data;
663         int size;
664
665         if (stringset == ETH_SS_STATS) {
666                 size = ARRAY_SIZE(g_mac_stats_string);
667                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
668                                            size, p);
669                 p = hclge_tqps_get_strings(handle, p);
670         } else if (stringset == ETH_SS_TEST) {
671                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
672                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
673                                ETH_GSTRING_LEN);
674                         p += ETH_GSTRING_LEN;
675                 }
676                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
677                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
678                                ETH_GSTRING_LEN);
679                         p += ETH_GSTRING_LEN;
680                 }
681                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
682                         memcpy(p,
683                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
684                                ETH_GSTRING_LEN);
685                         p += ETH_GSTRING_LEN;
686                 }
687                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
688                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
689                                ETH_GSTRING_LEN);
690                         p += ETH_GSTRING_LEN;
691                 }
692         }
693 }
694
695 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
696 {
697         struct hclge_vport *vport = hclge_get_vport(handle);
698         struct hclge_dev *hdev = vport->back;
699         u64 *p;
700
701         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
702                                  ARRAY_SIZE(g_mac_stats_string), data);
703         p = hclge_tqps_get_stats(handle, p);
704 }
705
706 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
707                                      u64 *rx_cnt)
708 {
709         struct hclge_vport *vport = hclge_get_vport(handle);
710         struct hclge_dev *hdev = vport->back;
711
712         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
713         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
714 }
715
716 static int hclge_parse_func_status(struct hclge_dev *hdev,
717                                    struct hclge_func_status_cmd *status)
718 {
719         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
720                 return -EINVAL;
721
722         /* Set the pf to main pf */
723         if (status->pf_state & HCLGE_PF_STATE_MAIN)
724                 hdev->flag |= HCLGE_FLAG_MAIN;
725         else
726                 hdev->flag &= ~HCLGE_FLAG_MAIN;
727
728         return 0;
729 }
730
731 static int hclge_query_function_status(struct hclge_dev *hdev)
732 {
733 #define HCLGE_QUERY_MAX_CNT     5
734
735         struct hclge_func_status_cmd *req;
736         struct hclge_desc desc;
737         int timeout = 0;
738         int ret;
739
740         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
741         req = (struct hclge_func_status_cmd *)desc.data;
742
743         do {
744                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
745                 if (ret) {
746                         dev_err(&hdev->pdev->dev,
747                                 "query function status failed %d.\n", ret);
748                         return ret;
749                 }
750
751                 /* Check pf reset is done */
752                 if (req->pf_state)
753                         break;
754                 usleep_range(1000, 2000);
755         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
756
757         ret = hclge_parse_func_status(hdev, req);
758
759         return ret;
760 }
761
762 static int hclge_query_pf_resource(struct hclge_dev *hdev)
763 {
764         struct hclge_pf_res_cmd *req;
765         struct hclge_desc desc;
766         int ret;
767
768         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
769         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
770         if (ret) {
771                 dev_err(&hdev->pdev->dev,
772                         "query pf resource failed %d.\n", ret);
773                 return ret;
774         }
775
776         req = (struct hclge_pf_res_cmd *)desc.data;
777         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
778         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
779
780         if (req->tx_buf_size)
781                 hdev->tx_buf_size =
782                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
783         else
784                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
785
786         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
787
788         if (req->dv_buf_size)
789                 hdev->dv_buf_size =
790                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
791         else
792                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
793
794         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
795
796         if (hnae3_dev_roce_supported(hdev)) {
797                 hdev->roce_base_msix_offset =
798                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
799                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
800                 hdev->num_roce_msi =
801                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
802                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
803
804                 /* PF should have NIC vectors and Roce vectors,
805                  * NIC vectors are queued before Roce vectors.
806                  */
807                 hdev->num_msi = hdev->num_roce_msi +
808                                 hdev->roce_base_msix_offset;
809         } else {
810                 hdev->num_msi =
811                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
812                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
813         }
814
815         return 0;
816 }
817
818 static int hclge_parse_speed(int speed_cmd, int *speed)
819 {
820         switch (speed_cmd) {
821         case 6:
822                 *speed = HCLGE_MAC_SPEED_10M;
823                 break;
824         case 7:
825                 *speed = HCLGE_MAC_SPEED_100M;
826                 break;
827         case 0:
828                 *speed = HCLGE_MAC_SPEED_1G;
829                 break;
830         case 1:
831                 *speed = HCLGE_MAC_SPEED_10G;
832                 break;
833         case 2:
834                 *speed = HCLGE_MAC_SPEED_25G;
835                 break;
836         case 3:
837                 *speed = HCLGE_MAC_SPEED_40G;
838                 break;
839         case 4:
840                 *speed = HCLGE_MAC_SPEED_50G;
841                 break;
842         case 5:
843                 *speed = HCLGE_MAC_SPEED_100G;
844                 break;
845         default:
846                 return -EINVAL;
847         }
848
849         return 0;
850 }
851
852 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
853 {
854         struct hclge_vport *vport = hclge_get_vport(handle);
855         struct hclge_dev *hdev = vport->back;
856         u32 speed_ability = hdev->hw.mac.speed_ability;
857         u32 speed_bit = 0;
858
859         switch (speed) {
860         case HCLGE_MAC_SPEED_10M:
861                 speed_bit = HCLGE_SUPPORT_10M_BIT;
862                 break;
863         case HCLGE_MAC_SPEED_100M:
864                 speed_bit = HCLGE_SUPPORT_100M_BIT;
865                 break;
866         case HCLGE_MAC_SPEED_1G:
867                 speed_bit = HCLGE_SUPPORT_1G_BIT;
868                 break;
869         case HCLGE_MAC_SPEED_10G:
870                 speed_bit = HCLGE_SUPPORT_10G_BIT;
871                 break;
872         case HCLGE_MAC_SPEED_25G:
873                 speed_bit = HCLGE_SUPPORT_25G_BIT;
874                 break;
875         case HCLGE_MAC_SPEED_40G:
876                 speed_bit = HCLGE_SUPPORT_40G_BIT;
877                 break;
878         case HCLGE_MAC_SPEED_50G:
879                 speed_bit = HCLGE_SUPPORT_50G_BIT;
880                 break;
881         case HCLGE_MAC_SPEED_100G:
882                 speed_bit = HCLGE_SUPPORT_100G_BIT;
883                 break;
884         default:
885                 return -EINVAL;
886         }
887
888         if (speed_bit & speed_ability)
889                 return 0;
890
891         return -EINVAL;
892 }
893
894 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
895 {
896         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
897                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
898                                  mac->supported);
899         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
900                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
901                                  mac->supported);
902         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
903                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
904                                  mac->supported);
905         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
906                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
907                                  mac->supported);
908         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
909                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
910                                  mac->supported);
911 }
912
913 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
914 {
915         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
916                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
917                                  mac->supported);
918         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
919                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
920                                  mac->supported);
921         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
922                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
923                                  mac->supported);
924         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
925                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
926                                  mac->supported);
927         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
928                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
929                                  mac->supported);
930 }
931
932 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
933 {
934         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
935                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
936                                  mac->supported);
937         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
938                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
939                                  mac->supported);
940         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
941                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
942                                  mac->supported);
943         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
944                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
945                                  mac->supported);
946         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
947                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
948                                  mac->supported);
949 }
950
951 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
952 {
953         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
954                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
955                                  mac->supported);
956         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
957                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
958                                  mac->supported);
959         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
960                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
961                                  mac->supported);
962         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
963                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
964                                  mac->supported);
965         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
966                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
967                                  mac->supported);
968         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
969                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
970                                  mac->supported);
971 }
972
973 static void hclge_convert_setting_fec(struct hclge_mac *mac)
974 {
975         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
976         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
977
978         switch (mac->speed) {
979         case HCLGE_MAC_SPEED_10G:
980         case HCLGE_MAC_SPEED_40G:
981                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
982                                  mac->supported);
983                 mac->fec_ability =
984                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
985                 break;
986         case HCLGE_MAC_SPEED_25G:
987         case HCLGE_MAC_SPEED_50G:
988                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
989                                  mac->supported);
990                 mac->fec_ability =
991                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
992                         BIT(HNAE3_FEC_AUTO);
993                 break;
994         case HCLGE_MAC_SPEED_100G:
995                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
996                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
997                 break;
998         default:
999                 mac->fec_ability = 0;
1000                 break;
1001         }
1002 }
1003
1004 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1005                                         u8 speed_ability)
1006 {
1007         struct hclge_mac *mac = &hdev->hw.mac;
1008
1009         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1010                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1011                                  mac->supported);
1012
1013         hclge_convert_setting_sr(mac, speed_ability);
1014         hclge_convert_setting_lr(mac, speed_ability);
1015         hclge_convert_setting_cr(mac, speed_ability);
1016         if (hdev->pdev->revision >= 0x21)
1017                 hclge_convert_setting_fec(mac);
1018
1019         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1020         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1021         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1022 }
1023
1024 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1025                                             u8 speed_ability)
1026 {
1027         struct hclge_mac *mac = &hdev->hw.mac;
1028
1029         hclge_convert_setting_kr(mac, speed_ability);
1030         if (hdev->pdev->revision >= 0x21)
1031                 hclge_convert_setting_fec(mac);
1032         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1033         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1034         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1035 }
1036
1037 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1038                                          u8 speed_ability)
1039 {
1040         unsigned long *supported = hdev->hw.mac.supported;
1041
1042         /* default to support all speed for GE port */
1043         if (!speed_ability)
1044                 speed_ability = HCLGE_SUPPORT_GE;
1045
1046         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1047                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1048                                  supported);
1049
1050         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1052                                  supported);
1053                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1054                                  supported);
1055         }
1056
1057         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1059                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1060         }
1061
1062         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1063         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1064         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1065 }
1066
1067 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1068 {
1069         u8 media_type = hdev->hw.mac.media_type;
1070
1071         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1072                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1073         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1074                 hclge_parse_copper_link_mode(hdev, speed_ability);
1075         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1076                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1077 }
1078 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1079 {
1080         struct hclge_cfg_param_cmd *req;
1081         u64 mac_addr_tmp_high;
1082         u64 mac_addr_tmp;
1083         unsigned int i;
1084
1085         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1086
1087         /* get the configuration */
1088         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1089                                               HCLGE_CFG_VMDQ_M,
1090                                               HCLGE_CFG_VMDQ_S);
1091         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1093         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1094                                             HCLGE_CFG_TQP_DESC_N_M,
1095                                             HCLGE_CFG_TQP_DESC_N_S);
1096
1097         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1098                                         HCLGE_CFG_PHY_ADDR_M,
1099                                         HCLGE_CFG_PHY_ADDR_S);
1100         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1101                                           HCLGE_CFG_MEDIA_TP_M,
1102                                           HCLGE_CFG_MEDIA_TP_S);
1103         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1104                                           HCLGE_CFG_RX_BUF_LEN_M,
1105                                           HCLGE_CFG_RX_BUF_LEN_S);
1106         /* get mac_address */
1107         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1108         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1109                                             HCLGE_CFG_MAC_ADDR_H_M,
1110                                             HCLGE_CFG_MAC_ADDR_H_S);
1111
1112         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1113
1114         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1115                                              HCLGE_CFG_DEFAULT_SPEED_M,
1116                                              HCLGE_CFG_DEFAULT_SPEED_S);
1117         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1118                                             HCLGE_CFG_RSS_SIZE_M,
1119                                             HCLGE_CFG_RSS_SIZE_S);
1120
1121         for (i = 0; i < ETH_ALEN; i++)
1122                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1123
1124         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1125         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1126
1127         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1128                                              HCLGE_CFG_SPEED_ABILITY_M,
1129                                              HCLGE_CFG_SPEED_ABILITY_S);
1130         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1131                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1132                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1133         if (!cfg->umv_space)
1134                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1135 }
1136
1137 /* hclge_get_cfg: query the static parameter from flash
1138  * @hdev: pointer to struct hclge_dev
1139  * @hcfg: the config structure to be getted
1140  */
1141 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1142 {
1143         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1144         struct hclge_cfg_param_cmd *req;
1145         unsigned int i;
1146         int ret;
1147
1148         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1149                 u32 offset = 0;
1150
1151                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1152                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1153                                            true);
1154                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1155                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1156                 /* Len should be united by 4 bytes when send to hardware */
1157                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1158                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1159                 req->offset = cpu_to_le32(offset);
1160         }
1161
1162         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1163         if (ret) {
1164                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1165                 return ret;
1166         }
1167
1168         hclge_parse_cfg(hcfg, desc);
1169
1170         return 0;
1171 }
1172
1173 static int hclge_get_cap(struct hclge_dev *hdev)
1174 {
1175         int ret;
1176
1177         ret = hclge_query_function_status(hdev);
1178         if (ret) {
1179                 dev_err(&hdev->pdev->dev,
1180                         "query function status error %d.\n", ret);
1181                 return ret;
1182         }
1183
1184         /* get pf resource */
1185         ret = hclge_query_pf_resource(hdev);
1186         if (ret)
1187                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1188
1189         return ret;
1190 }
1191
1192 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1193 {
1194 #define HCLGE_MIN_TX_DESC       64
1195 #define HCLGE_MIN_RX_DESC       64
1196
1197         if (!is_kdump_kernel())
1198                 return;
1199
1200         dev_info(&hdev->pdev->dev,
1201                  "Running kdump kernel. Using minimal resources\n");
1202
1203         /* minimal queue pairs equals to the number of vports */
1204         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1205         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1206         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1207 }
1208
1209 static int hclge_configure(struct hclge_dev *hdev)
1210 {
1211         struct hclge_cfg cfg;
1212         unsigned int i;
1213         int ret;
1214
1215         ret = hclge_get_cfg(hdev, &cfg);
1216         if (ret) {
1217                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1218                 return ret;
1219         }
1220
1221         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1222         hdev->base_tqp_pid = 0;
1223         hdev->rss_size_max = cfg.rss_size_max;
1224         hdev->rx_buf_len = cfg.rx_buf_len;
1225         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1226         hdev->hw.mac.media_type = cfg.media_type;
1227         hdev->hw.mac.phy_addr = cfg.phy_addr;
1228         hdev->num_tx_desc = cfg.tqp_desc_num;
1229         hdev->num_rx_desc = cfg.tqp_desc_num;
1230         hdev->tm_info.num_pg = 1;
1231         hdev->tc_max = cfg.tc_num;
1232         hdev->tm_info.hw_pfc_map = 0;
1233         hdev->wanted_umv_size = cfg.umv_space;
1234
1235         if (hnae3_dev_fd_supported(hdev)) {
1236                 hdev->fd_en = true;
1237                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1238         }
1239
1240         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1241         if (ret) {
1242                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1243                 return ret;
1244         }
1245
1246         hclge_parse_link_mode(hdev, cfg.speed_ability);
1247
1248         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1249             (hdev->tc_max < 1)) {
1250                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251                          hdev->tc_max);
1252                 hdev->tc_max = 1;
1253         }
1254
1255         /* Dev does not support DCB */
1256         if (!hnae3_dev_dcb_supported(hdev)) {
1257                 hdev->tc_max = 1;
1258                 hdev->pfc_max = 0;
1259         } else {
1260                 hdev->pfc_max = hdev->tc_max;
1261         }
1262
1263         hdev->tm_info.num_tc = 1;
1264
1265         /* Currently not support uncontiuous tc */
1266         for (i = 0; i < hdev->tm_info.num_tc; i++)
1267                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1268
1269         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1270
1271         hclge_init_kdump_kernel_config(hdev);
1272
1273         return ret;
1274 }
1275
1276 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1277                             unsigned int tso_mss_max)
1278 {
1279         struct hclge_cfg_tso_status_cmd *req;
1280         struct hclge_desc desc;
1281         u16 tso_mss;
1282
1283         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1284
1285         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1286
1287         tso_mss = 0;
1288         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1289                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1290         req->tso_mss_min = cpu_to_le16(tso_mss);
1291
1292         tso_mss = 0;
1293         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1294                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1295         req->tso_mss_max = cpu_to_le16(tso_mss);
1296
1297         return hclge_cmd_send(&hdev->hw, &desc, 1);
1298 }
1299
1300 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1301 {
1302         struct hclge_cfg_gro_status_cmd *req;
1303         struct hclge_desc desc;
1304         int ret;
1305
1306         if (!hnae3_dev_gro_supported(hdev))
1307                 return 0;
1308
1309         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1310         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1311
1312         req->gro_en = cpu_to_le16(en ? 1 : 0);
1313
1314         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1315         if (ret)
1316                 dev_err(&hdev->pdev->dev,
1317                         "GRO hardware config cmd failed, ret = %d\n", ret);
1318
1319         return ret;
1320 }
1321
1322 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1323 {
1324         struct hclge_tqp *tqp;
1325         int i;
1326
1327         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1328                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1329         if (!hdev->htqp)
1330                 return -ENOMEM;
1331
1332         tqp = hdev->htqp;
1333
1334         for (i = 0; i < hdev->num_tqps; i++) {
1335                 tqp->dev = &hdev->pdev->dev;
1336                 tqp->index = i;
1337
1338                 tqp->q.ae_algo = &ae_algo;
1339                 tqp->q.buf_size = hdev->rx_buf_len;
1340                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1341                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1342                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1343                         i * HCLGE_TQP_REG_SIZE;
1344
1345                 tqp++;
1346         }
1347
1348         return 0;
1349 }
1350
1351 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1352                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1353 {
1354         struct hclge_tqp_map_cmd *req;
1355         struct hclge_desc desc;
1356         int ret;
1357
1358         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1359
1360         req = (struct hclge_tqp_map_cmd *)desc.data;
1361         req->tqp_id = cpu_to_le16(tqp_pid);
1362         req->tqp_vf = func_id;
1363         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1364                         1 << HCLGE_TQP_MAP_EN_B;
1365         req->tqp_vid = cpu_to_le16(tqp_vid);
1366
1367         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1368         if (ret)
1369                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1370
1371         return ret;
1372 }
1373
1374 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1375 {
1376         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1377         struct hclge_dev *hdev = vport->back;
1378         int i, alloced;
1379
1380         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1381              alloced < num_tqps; i++) {
1382                 if (!hdev->htqp[i].alloced) {
1383                         hdev->htqp[i].q.handle = &vport->nic;
1384                         hdev->htqp[i].q.tqp_index = alloced;
1385                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1386                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1387                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1388                         hdev->htqp[i].alloced = true;
1389                         alloced++;
1390                 }
1391         }
1392         vport->alloc_tqps = alloced;
1393         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1394                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1395
1396         return 0;
1397 }
1398
1399 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1400                             u16 num_tx_desc, u16 num_rx_desc)
1401
1402 {
1403         struct hnae3_handle *nic = &vport->nic;
1404         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1405         struct hclge_dev *hdev = vport->back;
1406         int ret;
1407
1408         kinfo->num_tx_desc = num_tx_desc;
1409         kinfo->num_rx_desc = num_rx_desc;
1410
1411         kinfo->rx_buf_len = hdev->rx_buf_len;
1412
1413         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1414                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1415         if (!kinfo->tqp)
1416                 return -ENOMEM;
1417
1418         ret = hclge_assign_tqp(vport, num_tqps);
1419         if (ret)
1420                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1421
1422         return ret;
1423 }
1424
1425 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1426                                   struct hclge_vport *vport)
1427 {
1428         struct hnae3_handle *nic = &vport->nic;
1429         struct hnae3_knic_private_info *kinfo;
1430         u16 i;
1431
1432         kinfo = &nic->kinfo;
1433         for (i = 0; i < vport->alloc_tqps; i++) {
1434                 struct hclge_tqp *q =
1435                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1436                 bool is_pf;
1437                 int ret;
1438
1439                 is_pf = !(vport->vport_id);
1440                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1441                                              i, is_pf);
1442                 if (ret)
1443                         return ret;
1444         }
1445
1446         return 0;
1447 }
1448
1449 static int hclge_map_tqp(struct hclge_dev *hdev)
1450 {
1451         struct hclge_vport *vport = hdev->vport;
1452         u16 i, num_vport;
1453
1454         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1455         for (i = 0; i < num_vport; i++) {
1456                 int ret;
1457
1458                 ret = hclge_map_tqp_to_vport(hdev, vport);
1459                 if (ret)
1460                         return ret;
1461
1462                 vport++;
1463         }
1464
1465         return 0;
1466 }
1467
1468 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1469 {
1470         struct hnae3_handle *nic = &vport->nic;
1471         struct hclge_dev *hdev = vport->back;
1472         int ret;
1473
1474         nic->pdev = hdev->pdev;
1475         nic->ae_algo = &ae_algo;
1476         nic->numa_node_mask = hdev->numa_node_mask;
1477
1478         ret = hclge_knic_setup(vport, num_tqps,
1479                                hdev->num_tx_desc, hdev->num_rx_desc);
1480         if (ret)
1481                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1482
1483         return ret;
1484 }
1485
1486 static int hclge_alloc_vport(struct hclge_dev *hdev)
1487 {
1488         struct pci_dev *pdev = hdev->pdev;
1489         struct hclge_vport *vport;
1490         u32 tqp_main_vport;
1491         u32 tqp_per_vport;
1492         int num_vport, i;
1493         int ret;
1494
1495         /* We need to alloc a vport for main NIC of PF */
1496         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1497
1498         if (hdev->num_tqps < num_vport) {
1499                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1500                         hdev->num_tqps, num_vport);
1501                 return -EINVAL;
1502         }
1503
1504         /* Alloc the same number of TQPs for every vport */
1505         tqp_per_vport = hdev->num_tqps / num_vport;
1506         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1507
1508         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1509                              GFP_KERNEL);
1510         if (!vport)
1511                 return -ENOMEM;
1512
1513         hdev->vport = vport;
1514         hdev->num_alloc_vport = num_vport;
1515
1516         if (IS_ENABLED(CONFIG_PCI_IOV))
1517                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1518
1519         for (i = 0; i < num_vport; i++) {
1520                 vport->back = hdev;
1521                 vport->vport_id = i;
1522                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1523                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1524                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1525                 INIT_LIST_HEAD(&vport->vlan_list);
1526                 INIT_LIST_HEAD(&vport->uc_mac_list);
1527                 INIT_LIST_HEAD(&vport->mc_mac_list);
1528
1529                 if (i == 0)
1530                         ret = hclge_vport_setup(vport, tqp_main_vport);
1531                 else
1532                         ret = hclge_vport_setup(vport, tqp_per_vport);
1533                 if (ret) {
1534                         dev_err(&pdev->dev,
1535                                 "vport setup failed for vport %d, %d\n",
1536                                 i, ret);
1537                         return ret;
1538                 }
1539
1540                 vport++;
1541         }
1542
1543         return 0;
1544 }
1545
1546 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1547                                     struct hclge_pkt_buf_alloc *buf_alloc)
1548 {
1549 /* TX buffer size is unit by 128 byte */
1550 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1551 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1552         struct hclge_tx_buff_alloc_cmd *req;
1553         struct hclge_desc desc;
1554         int ret;
1555         u8 i;
1556
1557         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1558
1559         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1560         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1561                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1562
1563                 req->tx_pkt_buff[i] =
1564                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1565                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1566         }
1567
1568         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1569         if (ret)
1570                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1571                         ret);
1572
1573         return ret;
1574 }
1575
1576 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1577                                  struct hclge_pkt_buf_alloc *buf_alloc)
1578 {
1579         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1580
1581         if (ret)
1582                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1583
1584         return ret;
1585 }
1586
1587 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1588 {
1589         unsigned int i;
1590         u32 cnt = 0;
1591
1592         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1593                 if (hdev->hw_tc_map & BIT(i))
1594                         cnt++;
1595         return cnt;
1596 }
1597
1598 /* Get the number of pfc enabled TCs, which have private buffer */
1599 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1600                                   struct hclge_pkt_buf_alloc *buf_alloc)
1601 {
1602         struct hclge_priv_buf *priv;
1603         unsigned int i;
1604         int cnt = 0;
1605
1606         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1607                 priv = &buf_alloc->priv_buf[i];
1608                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1609                     priv->enable)
1610                         cnt++;
1611         }
1612
1613         return cnt;
1614 }
1615
1616 /* Get the number of pfc disabled TCs, which have private buffer */
1617 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1618                                      struct hclge_pkt_buf_alloc *buf_alloc)
1619 {
1620         struct hclge_priv_buf *priv;
1621         unsigned int i;
1622         int cnt = 0;
1623
1624         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1625                 priv = &buf_alloc->priv_buf[i];
1626                 if (hdev->hw_tc_map & BIT(i) &&
1627                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1628                     priv->enable)
1629                         cnt++;
1630         }
1631
1632         return cnt;
1633 }
1634
1635 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1636 {
1637         struct hclge_priv_buf *priv;
1638         u32 rx_priv = 0;
1639         int i;
1640
1641         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1642                 priv = &buf_alloc->priv_buf[i];
1643                 if (priv->enable)
1644                         rx_priv += priv->buf_size;
1645         }
1646         return rx_priv;
1647 }
1648
1649 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1650 {
1651         u32 i, total_tx_size = 0;
1652
1653         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1654                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1655
1656         return total_tx_size;
1657 }
1658
1659 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1660                                 struct hclge_pkt_buf_alloc *buf_alloc,
1661                                 u32 rx_all)
1662 {
1663         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1664         u32 tc_num = hclge_get_tc_num(hdev);
1665         u32 shared_buf, aligned_mps;
1666         u32 rx_priv;
1667         int i;
1668
1669         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1670
1671         if (hnae3_dev_dcb_supported(hdev))
1672                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1673                                         hdev->dv_buf_size;
1674         else
1675                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1676                                         + hdev->dv_buf_size;
1677
1678         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1679         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1680                              HCLGE_BUF_SIZE_UNIT);
1681
1682         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1683         if (rx_all < rx_priv + shared_std)
1684                 return false;
1685
1686         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1687         buf_alloc->s_buf.buf_size = shared_buf;
1688         if (hnae3_dev_dcb_supported(hdev)) {
1689                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1690                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1691                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1692                                   HCLGE_BUF_SIZE_UNIT);
1693         } else {
1694                 buf_alloc->s_buf.self.high = aligned_mps +
1695                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1696                 buf_alloc->s_buf.self.low = aligned_mps;
1697         }
1698
1699         if (hnae3_dev_dcb_supported(hdev)) {
1700                 hi_thrd = shared_buf - hdev->dv_buf_size;
1701
1702                 if (tc_num <= NEED_RESERVE_TC_NUM)
1703                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1704                                         / BUF_MAX_PERCENT;
1705
1706                 if (tc_num)
1707                         hi_thrd = hi_thrd / tc_num;
1708
1709                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1710                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1712         } else {
1713                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714                 lo_thrd = aligned_mps;
1715         }
1716
1717         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1720         }
1721
1722         return true;
1723 }
1724
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726                                 struct hclge_pkt_buf_alloc *buf_alloc)
1727 {
1728         u32 i, total_size;
1729
1730         total_size = hdev->pkt_buf_size;
1731
1732         /* alloc tx buffer for all enabled tc */
1733         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1735
1736                 if (hdev->hw_tc_map & BIT(i)) {
1737                         if (total_size < hdev->tx_buf_size)
1738                                 return -ENOMEM;
1739
1740                         priv->tx_buf_size = hdev->tx_buf_size;
1741                 } else {
1742                         priv->tx_buf_size = 0;
1743                 }
1744
1745                 total_size -= priv->tx_buf_size;
1746         }
1747
1748         return 0;
1749 }
1750
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752                                   struct hclge_pkt_buf_alloc *buf_alloc)
1753 {
1754         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1756         unsigned int i;
1757
1758         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1760
1761                 priv->enable = 0;
1762                 priv->wl.low = 0;
1763                 priv->wl.high = 0;
1764                 priv->buf_size = 0;
1765
1766                 if (!(hdev->hw_tc_map & BIT(i)))
1767                         continue;
1768
1769                 priv->enable = 1;
1770
1771                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1773                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774                                                 HCLGE_BUF_SIZE_UNIT);
1775                 } else {
1776                         priv->wl.low = 0;
1777                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1778                                         aligned_mps;
1779                 }
1780
1781                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1782         }
1783
1784         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1785 }
1786
1787 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1788                                           struct hclge_pkt_buf_alloc *buf_alloc)
1789 {
1790         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1791         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1792         int i;
1793
1794         /* let the last to be cleared first */
1795         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1796                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1797                 unsigned int mask = BIT((unsigned int)i);
1798
1799                 if (hdev->hw_tc_map & mask &&
1800                     !(hdev->tm_info.hw_pfc_map & mask)) {
1801                         /* Clear the no pfc TC private buffer */
1802                         priv->wl.low = 0;
1803                         priv->wl.high = 0;
1804                         priv->buf_size = 0;
1805                         priv->enable = 0;
1806                         no_pfc_priv_num--;
1807                 }
1808
1809                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1810                     no_pfc_priv_num == 0)
1811                         break;
1812         }
1813
1814         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1815 }
1816
1817 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1818                                         struct hclge_pkt_buf_alloc *buf_alloc)
1819 {
1820         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1821         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1822         int i;
1823
1824         /* let the last to be cleared first */
1825         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1826                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1827                 unsigned int mask = BIT((unsigned int)i);
1828
1829                 if (hdev->hw_tc_map & mask &&
1830                     hdev->tm_info.hw_pfc_map & mask) {
1831                         /* Reduce the number of pfc TC with private buffer */
1832                         priv->wl.low = 0;
1833                         priv->enable = 0;
1834                         priv->wl.high = 0;
1835                         priv->buf_size = 0;
1836                         pfc_priv_num--;
1837                 }
1838
1839                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1840                     pfc_priv_num == 0)
1841                         break;
1842         }
1843
1844         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1845 }
1846
1847 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1848                                       struct hclge_pkt_buf_alloc *buf_alloc)
1849 {
1850 #define COMPENSATE_BUFFER       0x3C00
1851 #define COMPENSATE_HALF_MPS_NUM 5
1852 #define PRIV_WL_GAP             0x1800
1853
1854         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1855         u32 tc_num = hclge_get_tc_num(hdev);
1856         u32 half_mps = hdev->mps >> 1;
1857         u32 min_rx_priv;
1858         unsigned int i;
1859
1860         if (tc_num)
1861                 rx_priv = rx_priv / tc_num;
1862
1863         if (tc_num <= NEED_RESERVE_TC_NUM)
1864                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1865
1866         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1867                         COMPENSATE_HALF_MPS_NUM * half_mps;
1868         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1869         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1870
1871         if (rx_priv < min_rx_priv)
1872                 return false;
1873
1874         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1875                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1876
1877                 priv->enable = 0;
1878                 priv->wl.low = 0;
1879                 priv->wl.high = 0;
1880                 priv->buf_size = 0;
1881
1882                 if (!(hdev->hw_tc_map & BIT(i)))
1883                         continue;
1884
1885                 priv->enable = 1;
1886                 priv->buf_size = rx_priv;
1887                 priv->wl.high = rx_priv - hdev->dv_buf_size;
1888                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
1889         }
1890
1891         buf_alloc->s_buf.buf_size = 0;
1892
1893         return true;
1894 }
1895
1896 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1897  * @hdev: pointer to struct hclge_dev
1898  * @buf_alloc: pointer to buffer calculation data
1899  * @return: 0: calculate sucessful, negative: fail
1900  */
1901 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1902                                 struct hclge_pkt_buf_alloc *buf_alloc)
1903 {
1904         /* When DCB is not supported, rx private buffer is not allocated. */
1905         if (!hnae3_dev_dcb_supported(hdev)) {
1906                 u32 rx_all = hdev->pkt_buf_size;
1907
1908                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1909                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1910                         return -ENOMEM;
1911
1912                 return 0;
1913         }
1914
1915         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
1916                 return 0;
1917
1918         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1919                 return 0;
1920
1921         /* try to decrease the buffer size */
1922         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1923                 return 0;
1924
1925         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1926                 return 0;
1927
1928         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1929                 return 0;
1930
1931         return -ENOMEM;
1932 }
1933
1934 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1935                                    struct hclge_pkt_buf_alloc *buf_alloc)
1936 {
1937         struct hclge_rx_priv_buff_cmd *req;
1938         struct hclge_desc desc;
1939         int ret;
1940         int i;
1941
1942         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1943         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1944
1945         /* Alloc private buffer TCs */
1946         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1947                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1948
1949                 req->buf_num[i] =
1950                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1951                 req->buf_num[i] |=
1952                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1953         }
1954
1955         req->shared_buf =
1956                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1957                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1958
1959         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1960         if (ret)
1961                 dev_err(&hdev->pdev->dev,
1962                         "rx private buffer alloc cmd failed %d\n", ret);
1963
1964         return ret;
1965 }
1966
1967 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1968                                    struct hclge_pkt_buf_alloc *buf_alloc)
1969 {
1970         struct hclge_rx_priv_wl_buf *req;
1971         struct hclge_priv_buf *priv;
1972         struct hclge_desc desc[2];
1973         int i, j;
1974         int ret;
1975
1976         for (i = 0; i < 2; i++) {
1977                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1978                                            false);
1979                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1980
1981                 /* The first descriptor set the NEXT bit to 1 */
1982                 if (i == 0)
1983                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1984                 else
1985                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1986
1987                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1988                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1989
1990                         priv = &buf_alloc->priv_buf[idx];
1991                         req->tc_wl[j].high =
1992                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1993                         req->tc_wl[j].high |=
1994                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1995                         req->tc_wl[j].low =
1996                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1997                         req->tc_wl[j].low |=
1998                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1999                 }
2000         }
2001
2002         /* Send 2 descriptor at one time */
2003         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2004         if (ret)
2005                 dev_err(&hdev->pdev->dev,
2006                         "rx private waterline config cmd failed %d\n",
2007                         ret);
2008         return ret;
2009 }
2010
2011 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2012                                     struct hclge_pkt_buf_alloc *buf_alloc)
2013 {
2014         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2015         struct hclge_rx_com_thrd *req;
2016         struct hclge_desc desc[2];
2017         struct hclge_tc_thrd *tc;
2018         int i, j;
2019         int ret;
2020
2021         for (i = 0; i < 2; i++) {
2022                 hclge_cmd_setup_basic_desc(&desc[i],
2023                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2024                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2025
2026                 /* The first descriptor set the NEXT bit to 1 */
2027                 if (i == 0)
2028                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2029                 else
2030                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2031
2032                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2033                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2034
2035                         req->com_thrd[j].high =
2036                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2037                         req->com_thrd[j].high |=
2038                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2039                         req->com_thrd[j].low =
2040                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2041                         req->com_thrd[j].low |=
2042                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2043                 }
2044         }
2045
2046         /* Send 2 descriptors at one time */
2047         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2048         if (ret)
2049                 dev_err(&hdev->pdev->dev,
2050                         "common threshold config cmd failed %d\n", ret);
2051         return ret;
2052 }
2053
2054 static int hclge_common_wl_config(struct hclge_dev *hdev,
2055                                   struct hclge_pkt_buf_alloc *buf_alloc)
2056 {
2057         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2058         struct hclge_rx_com_wl *req;
2059         struct hclge_desc desc;
2060         int ret;
2061
2062         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2063
2064         req = (struct hclge_rx_com_wl *)desc.data;
2065         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2066         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2067
2068         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2069         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2070
2071         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2072         if (ret)
2073                 dev_err(&hdev->pdev->dev,
2074                         "common waterline config cmd failed %d\n", ret);
2075
2076         return ret;
2077 }
2078
2079 int hclge_buffer_alloc(struct hclge_dev *hdev)
2080 {
2081         struct hclge_pkt_buf_alloc *pkt_buf;
2082         int ret;
2083
2084         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2085         if (!pkt_buf)
2086                 return -ENOMEM;
2087
2088         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2089         if (ret) {
2090                 dev_err(&hdev->pdev->dev,
2091                         "could not calc tx buffer size for all TCs %d\n", ret);
2092                 goto out;
2093         }
2094
2095         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2096         if (ret) {
2097                 dev_err(&hdev->pdev->dev,
2098                         "could not alloc tx buffers %d\n", ret);
2099                 goto out;
2100         }
2101
2102         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2103         if (ret) {
2104                 dev_err(&hdev->pdev->dev,
2105                         "could not calc rx priv buffer size for all TCs %d\n",
2106                         ret);
2107                 goto out;
2108         }
2109
2110         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2111         if (ret) {
2112                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2113                         ret);
2114                 goto out;
2115         }
2116
2117         if (hnae3_dev_dcb_supported(hdev)) {
2118                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2119                 if (ret) {
2120                         dev_err(&hdev->pdev->dev,
2121                                 "could not configure rx private waterline %d\n",
2122                                 ret);
2123                         goto out;
2124                 }
2125
2126                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2127                 if (ret) {
2128                         dev_err(&hdev->pdev->dev,
2129                                 "could not configure common threshold %d\n",
2130                                 ret);
2131                         goto out;
2132                 }
2133         }
2134
2135         ret = hclge_common_wl_config(hdev, pkt_buf);
2136         if (ret)
2137                 dev_err(&hdev->pdev->dev,
2138                         "could not configure common waterline %d\n", ret);
2139
2140 out:
2141         kfree(pkt_buf);
2142         return ret;
2143 }
2144
2145 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2146 {
2147         struct hnae3_handle *roce = &vport->roce;
2148         struct hnae3_handle *nic = &vport->nic;
2149
2150         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2151
2152         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2153             vport->back->num_msi_left == 0)
2154                 return -EINVAL;
2155
2156         roce->rinfo.base_vector = vport->back->roce_base_vector;
2157
2158         roce->rinfo.netdev = nic->kinfo.netdev;
2159         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2160
2161         roce->pdev = nic->pdev;
2162         roce->ae_algo = nic->ae_algo;
2163         roce->numa_node_mask = nic->numa_node_mask;
2164
2165         return 0;
2166 }
2167
2168 static int hclge_init_msi(struct hclge_dev *hdev)
2169 {
2170         struct pci_dev *pdev = hdev->pdev;
2171         int vectors;
2172         int i;
2173
2174         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2175                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2176         if (vectors < 0) {
2177                 dev_err(&pdev->dev,
2178                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2179                         vectors);
2180                 return vectors;
2181         }
2182         if (vectors < hdev->num_msi)
2183                 dev_warn(&hdev->pdev->dev,
2184                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2185                          hdev->num_msi, vectors);
2186
2187         hdev->num_msi = vectors;
2188         hdev->num_msi_left = vectors;
2189         hdev->base_msi_vector = pdev->irq;
2190         hdev->roce_base_vector = hdev->base_msi_vector +
2191                                 hdev->roce_base_msix_offset;
2192
2193         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2194                                            sizeof(u16), GFP_KERNEL);
2195         if (!hdev->vector_status) {
2196                 pci_free_irq_vectors(pdev);
2197                 return -ENOMEM;
2198         }
2199
2200         for (i = 0; i < hdev->num_msi; i++)
2201                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2202
2203         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2204                                         sizeof(int), GFP_KERNEL);
2205         if (!hdev->vector_irq) {
2206                 pci_free_irq_vectors(pdev);
2207                 return -ENOMEM;
2208         }
2209
2210         return 0;
2211 }
2212
2213 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2214 {
2215         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2216                 duplex = HCLGE_MAC_FULL;
2217
2218         return duplex;
2219 }
2220
2221 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2222                                       u8 duplex)
2223 {
2224         struct hclge_config_mac_speed_dup_cmd *req;
2225         struct hclge_desc desc;
2226         int ret;
2227
2228         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2229
2230         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2231
2232         if (duplex)
2233                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2234
2235         switch (speed) {
2236         case HCLGE_MAC_SPEED_10M:
2237                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2238                                 HCLGE_CFG_SPEED_S, 6);
2239                 break;
2240         case HCLGE_MAC_SPEED_100M:
2241                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2242                                 HCLGE_CFG_SPEED_S, 7);
2243                 break;
2244         case HCLGE_MAC_SPEED_1G:
2245                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2246                                 HCLGE_CFG_SPEED_S, 0);
2247                 break;
2248         case HCLGE_MAC_SPEED_10G:
2249                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2250                                 HCLGE_CFG_SPEED_S, 1);
2251                 break;
2252         case HCLGE_MAC_SPEED_25G:
2253                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2254                                 HCLGE_CFG_SPEED_S, 2);
2255                 break;
2256         case HCLGE_MAC_SPEED_40G:
2257                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2258                                 HCLGE_CFG_SPEED_S, 3);
2259                 break;
2260         case HCLGE_MAC_SPEED_50G:
2261                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2262                                 HCLGE_CFG_SPEED_S, 4);
2263                 break;
2264         case HCLGE_MAC_SPEED_100G:
2265                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2266                                 HCLGE_CFG_SPEED_S, 5);
2267                 break;
2268         default:
2269                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2270                 return -EINVAL;
2271         }
2272
2273         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2274                       1);
2275
2276         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2277         if (ret) {
2278                 dev_err(&hdev->pdev->dev,
2279                         "mac speed/duplex config cmd failed %d.\n", ret);
2280                 return ret;
2281         }
2282
2283         return 0;
2284 }
2285
2286 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2287 {
2288         int ret;
2289
2290         duplex = hclge_check_speed_dup(duplex, speed);
2291         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2292                 return 0;
2293
2294         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2295         if (ret)
2296                 return ret;
2297
2298         hdev->hw.mac.speed = speed;
2299         hdev->hw.mac.duplex = duplex;
2300
2301         return 0;
2302 }
2303
2304 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2305                                      u8 duplex)
2306 {
2307         struct hclge_vport *vport = hclge_get_vport(handle);
2308         struct hclge_dev *hdev = vport->back;
2309
2310         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2311 }
2312
2313 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2314 {
2315         struct hclge_config_auto_neg_cmd *req;
2316         struct hclge_desc desc;
2317         u32 flag = 0;
2318         int ret;
2319
2320         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2321
2322         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2323         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2324         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2325
2326         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2327         if (ret)
2328                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2329                         ret);
2330
2331         return ret;
2332 }
2333
2334 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2335 {
2336         struct hclge_vport *vport = hclge_get_vport(handle);
2337         struct hclge_dev *hdev = vport->back;
2338
2339         if (!hdev->hw.mac.support_autoneg) {
2340                 if (enable) {
2341                         dev_err(&hdev->pdev->dev,
2342                                 "autoneg is not supported by current port\n");
2343                         return -EOPNOTSUPP;
2344                 } else {
2345                         return 0;
2346                 }
2347         }
2348
2349         return hclge_set_autoneg_en(hdev, enable);
2350 }
2351
2352 static int hclge_get_autoneg(struct hnae3_handle *handle)
2353 {
2354         struct hclge_vport *vport = hclge_get_vport(handle);
2355         struct hclge_dev *hdev = vport->back;
2356         struct phy_device *phydev = hdev->hw.mac.phydev;
2357
2358         if (phydev)
2359                 return phydev->autoneg;
2360
2361         return hdev->hw.mac.autoneg;
2362 }
2363
2364 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2365 {
2366         struct hclge_vport *vport = hclge_get_vport(handle);
2367         struct hclge_dev *hdev = vport->back;
2368         int ret;
2369
2370         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2371
2372         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2373         if (ret)
2374                 return ret;
2375         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2376 }
2377
2378 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2379 {
2380         struct hclge_vport *vport = hclge_get_vport(handle);
2381         struct hclge_dev *hdev = vport->back;
2382
2383         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2384                 return hclge_set_autoneg_en(hdev, !halt);
2385
2386         return 0;
2387 }
2388
2389 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2390 {
2391         struct hclge_config_fec_cmd *req;
2392         struct hclge_desc desc;
2393         int ret;
2394
2395         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2396
2397         req = (struct hclge_config_fec_cmd *)desc.data;
2398         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2399                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2400         if (fec_mode & BIT(HNAE3_FEC_RS))
2401                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2402                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2403         if (fec_mode & BIT(HNAE3_FEC_BASER))
2404                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2405                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2406
2407         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2408         if (ret)
2409                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2410
2411         return ret;
2412 }
2413
2414 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2415 {
2416         struct hclge_vport *vport = hclge_get_vport(handle);
2417         struct hclge_dev *hdev = vport->back;
2418         struct hclge_mac *mac = &hdev->hw.mac;
2419         int ret;
2420
2421         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2422                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2423                 return -EINVAL;
2424         }
2425
2426         ret = hclge_set_fec_hw(hdev, fec_mode);
2427         if (ret)
2428                 return ret;
2429
2430         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2431         return 0;
2432 }
2433
2434 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2435                           u8 *fec_mode)
2436 {
2437         struct hclge_vport *vport = hclge_get_vport(handle);
2438         struct hclge_dev *hdev = vport->back;
2439         struct hclge_mac *mac = &hdev->hw.mac;
2440
2441         if (fec_ability)
2442                 *fec_ability = mac->fec_ability;
2443         if (fec_mode)
2444                 *fec_mode = mac->fec_mode;
2445 }
2446
2447 static int hclge_mac_init(struct hclge_dev *hdev)
2448 {
2449         struct hclge_mac *mac = &hdev->hw.mac;
2450         int ret;
2451
2452         hdev->support_sfp_query = true;
2453         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2454         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2455                                          hdev->hw.mac.duplex);
2456         if (ret) {
2457                 dev_err(&hdev->pdev->dev,
2458                         "Config mac speed dup fail ret=%d\n", ret);
2459                 return ret;
2460         }
2461
2462         if (hdev->hw.mac.support_autoneg) {
2463                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2464                 if (ret) {
2465                         dev_err(&hdev->pdev->dev,
2466                                 "Config mac autoneg fail ret=%d\n", ret);
2467                         return ret;
2468                 }
2469         }
2470
2471         mac->link = 0;
2472
2473         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2474                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2475                 if (ret) {
2476                         dev_err(&hdev->pdev->dev,
2477                                 "Fec mode init fail, ret = %d\n", ret);
2478                         return ret;
2479                 }
2480         }
2481
2482         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2483         if (ret) {
2484                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2485                 return ret;
2486         }
2487
2488         ret = hclge_buffer_alloc(hdev);
2489         if (ret)
2490                 dev_err(&hdev->pdev->dev,
2491                         "allocate buffer fail, ret=%d\n", ret);
2492
2493         return ret;
2494 }
2495
2496 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2497 {
2498         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2499             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2500                 schedule_work(&hdev->mbx_service_task);
2501 }
2502
2503 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2504 {
2505         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2506             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2507                 schedule_work(&hdev->rst_service_task);
2508 }
2509
2510 static void hclge_task_schedule(struct hclge_dev *hdev)
2511 {
2512         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2513             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2514             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2515                 (void)schedule_work(&hdev->service_task);
2516 }
2517
2518 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2519 {
2520         struct hclge_link_status_cmd *req;
2521         struct hclge_desc desc;
2522         int link_status;
2523         int ret;
2524
2525         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2526         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2527         if (ret) {
2528                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2529                         ret);
2530                 return ret;
2531         }
2532
2533         req = (struct hclge_link_status_cmd *)desc.data;
2534         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2535
2536         return !!link_status;
2537 }
2538
2539 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2540 {
2541         unsigned int mac_state;
2542         int link_stat;
2543
2544         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2545                 return 0;
2546
2547         mac_state = hclge_get_mac_link_status(hdev);
2548
2549         if (hdev->hw.mac.phydev) {
2550                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2551                         link_stat = mac_state &
2552                                 hdev->hw.mac.phydev->link;
2553                 else
2554                         link_stat = 0;
2555
2556         } else {
2557                 link_stat = mac_state;
2558         }
2559
2560         return !!link_stat;
2561 }
2562
2563 static void hclge_update_link_status(struct hclge_dev *hdev)
2564 {
2565         struct hnae3_client *rclient = hdev->roce_client;
2566         struct hnae3_client *client = hdev->nic_client;
2567         struct hnae3_handle *rhandle;
2568         struct hnae3_handle *handle;
2569         int state;
2570         int i;
2571
2572         if (!client)
2573                 return;
2574         state = hclge_get_mac_phy_link(hdev);
2575         if (state != hdev->hw.mac.link) {
2576                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2577                         handle = &hdev->vport[i].nic;
2578                         client->ops->link_status_change(handle, state);
2579                         hclge_config_mac_tnl_int(hdev, state);
2580                         rhandle = &hdev->vport[i].roce;
2581                         if (rclient && rclient->ops->link_status_change)
2582                                 rclient->ops->link_status_change(rhandle,
2583                                                                  state);
2584                 }
2585                 hdev->hw.mac.link = state;
2586         }
2587 }
2588
2589 static void hclge_update_port_capability(struct hclge_mac *mac)
2590 {
2591         /* update fec ability by speed */
2592         hclge_convert_setting_fec(mac);
2593
2594         /* firmware can not identify back plane type, the media type
2595          * read from configuration can help deal it
2596          */
2597         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2598             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2599                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2600         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2601                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2602
2603         if (mac->support_autoneg == true) {
2604                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2605                 linkmode_copy(mac->advertising, mac->supported);
2606         } else {
2607                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2608                                    mac->supported);
2609                 linkmode_zero(mac->advertising);
2610         }
2611 }
2612
2613 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2614 {
2615         struct hclge_sfp_info_cmd *resp;
2616         struct hclge_desc desc;
2617         int ret;
2618
2619         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2620         resp = (struct hclge_sfp_info_cmd *)desc.data;
2621         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2622         if (ret == -EOPNOTSUPP) {
2623                 dev_warn(&hdev->pdev->dev,
2624                          "IMP do not support get SFP speed %d\n", ret);
2625                 return ret;
2626         } else if (ret) {
2627                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2628                 return ret;
2629         }
2630
2631         *speed = le32_to_cpu(resp->speed);
2632
2633         return 0;
2634 }
2635
2636 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2637 {
2638         struct hclge_sfp_info_cmd *resp;
2639         struct hclge_desc desc;
2640         int ret;
2641
2642         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2643         resp = (struct hclge_sfp_info_cmd *)desc.data;
2644
2645         resp->query_type = QUERY_ACTIVE_SPEED;
2646
2647         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2648         if (ret == -EOPNOTSUPP) {
2649                 dev_warn(&hdev->pdev->dev,
2650                          "IMP does not support get SFP info %d\n", ret);
2651                 return ret;
2652         } else if (ret) {
2653                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2654                 return ret;
2655         }
2656
2657         mac->speed = le32_to_cpu(resp->speed);
2658         /* if resp->speed_ability is 0, it means it's an old version
2659          * firmware, do not update these params
2660          */
2661         if (resp->speed_ability) {
2662                 mac->module_type = le32_to_cpu(resp->module_type);
2663                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2664                 mac->autoneg = resp->autoneg;
2665                 mac->support_autoneg = resp->autoneg_ability;
2666                 if (!resp->active_fec)
2667                         mac->fec_mode = 0;
2668                 else
2669                         mac->fec_mode = BIT(resp->active_fec);
2670         } else {
2671                 mac->speed_type = QUERY_SFP_SPEED;
2672         }
2673
2674         return 0;
2675 }
2676
2677 static int hclge_update_port_info(struct hclge_dev *hdev)
2678 {
2679         struct hclge_mac *mac = &hdev->hw.mac;
2680         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2681         int ret;
2682
2683         /* get the port info from SFP cmd if not copper port */
2684         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2685                 return 0;
2686
2687         /* if IMP does not support get SFP/qSFP info, return directly */
2688         if (!hdev->support_sfp_query)
2689                 return 0;
2690
2691         if (hdev->pdev->revision >= 0x21)
2692                 ret = hclge_get_sfp_info(hdev, mac);
2693         else
2694                 ret = hclge_get_sfp_speed(hdev, &speed);
2695
2696         if (ret == -EOPNOTSUPP) {
2697                 hdev->support_sfp_query = false;
2698                 return ret;
2699         } else if (ret) {
2700                 return ret;
2701         }
2702
2703         if (hdev->pdev->revision >= 0x21) {
2704                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2705                         hclge_update_port_capability(mac);
2706                         return 0;
2707                 }
2708                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2709                                                HCLGE_MAC_FULL);
2710         } else {
2711                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2712                         return 0; /* do nothing if no SFP */
2713
2714                 /* must config full duplex for SFP */
2715                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2716         }
2717 }
2718
2719 static int hclge_get_status(struct hnae3_handle *handle)
2720 {
2721         struct hclge_vport *vport = hclge_get_vport(handle);
2722         struct hclge_dev *hdev = vport->back;
2723
2724         hclge_update_link_status(hdev);
2725
2726         return hdev->hw.mac.link;
2727 }
2728
2729 static void hclge_service_timer(struct timer_list *t)
2730 {
2731         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2732
2733         mod_timer(&hdev->service_timer, jiffies + HZ);
2734         hdev->hw_stats.stats_timer++;
2735         hdev->fd_arfs_expire_timer++;
2736         hclge_task_schedule(hdev);
2737 }
2738
2739 static void hclge_service_complete(struct hclge_dev *hdev)
2740 {
2741         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2742
2743         /* Flush memory before next watchdog */
2744         smp_mb__before_atomic();
2745         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2746 }
2747
2748 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2749 {
2750         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2751
2752         /* fetch the events from their corresponding regs */
2753         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2754         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2755         msix_src_reg = hclge_read_dev(&hdev->hw,
2756                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2757
2758         /* Assumption: If by any chance reset and mailbox events are reported
2759          * together then we will only process reset event in this go and will
2760          * defer the processing of the mailbox events. Since, we would have not
2761          * cleared RX CMDQ event this time we would receive again another
2762          * interrupt from H/W just for the mailbox.
2763          */
2764
2765         /* check for vector0 reset event sources */
2766         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2767                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2768                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2769                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2770                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2771                 hdev->rst_stats.imp_rst_cnt++;
2772                 return HCLGE_VECTOR0_EVENT_RST;
2773         }
2774
2775         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2776                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2777                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2778                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2779                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2780                 hdev->rst_stats.global_rst_cnt++;
2781                 return HCLGE_VECTOR0_EVENT_RST;
2782         }
2783
2784         /* check for vector0 msix event source */
2785         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2786                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2787                         msix_src_reg);
2788                 return HCLGE_VECTOR0_EVENT_ERR;
2789         }
2790
2791         /* check for vector0 mailbox(=CMDQ RX) event source */
2792         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2793                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2794                 *clearval = cmdq_src_reg;
2795                 return HCLGE_VECTOR0_EVENT_MBX;
2796         }
2797
2798         /* print other vector0 event source */
2799         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2800                 cmdq_src_reg, msix_src_reg);
2801         return HCLGE_VECTOR0_EVENT_OTHER;
2802 }
2803
2804 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2805                                     u32 regclr)
2806 {
2807         switch (event_type) {
2808         case HCLGE_VECTOR0_EVENT_RST:
2809                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2810                 break;
2811         case HCLGE_VECTOR0_EVENT_MBX:
2812                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2813                 break;
2814         default:
2815                 break;
2816         }
2817 }
2818
2819 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2820 {
2821         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2822                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2823                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2824                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2825         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2826 }
2827
2828 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2829 {
2830         writel(enable ? 1 : 0, vector->addr);
2831 }
2832
2833 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2834 {
2835         struct hclge_dev *hdev = data;
2836         u32 clearval = 0;
2837         u32 event_cause;
2838
2839         hclge_enable_vector(&hdev->misc_vector, false);
2840         event_cause = hclge_check_event_cause(hdev, &clearval);
2841
2842         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2843         switch (event_cause) {
2844         case HCLGE_VECTOR0_EVENT_ERR:
2845                 /* we do not know what type of reset is required now. This could
2846                  * only be decided after we fetch the type of errors which
2847                  * caused this event. Therefore, we will do below for now:
2848                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2849                  *    have defered type of reset to be used.
2850                  * 2. Schedule the reset serivce task.
2851                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2852                  *    will fetch the correct type of reset.  This would be done
2853                  *    by first decoding the types of errors.
2854                  */
2855                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2856                 /* fall through */
2857         case HCLGE_VECTOR0_EVENT_RST:
2858                 hclge_reset_task_schedule(hdev);
2859                 break;
2860         case HCLGE_VECTOR0_EVENT_MBX:
2861                 /* If we are here then,
2862                  * 1. Either we are not handling any mbx task and we are not
2863                  *    scheduled as well
2864                  *                        OR
2865                  * 2. We could be handling a mbx task but nothing more is
2866                  *    scheduled.
2867                  * In both cases, we should schedule mbx task as there are more
2868                  * mbx messages reported by this interrupt.
2869                  */
2870                 hclge_mbx_task_schedule(hdev);
2871                 break;
2872         default:
2873                 dev_warn(&hdev->pdev->dev,
2874                          "received unknown or unhandled event of vector0\n");
2875                 break;
2876         }
2877
2878         /* clear the source of interrupt if it is not cause by reset */
2879         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2880                 hclge_clear_event_cause(hdev, event_cause, clearval);
2881                 hclge_enable_vector(&hdev->misc_vector, true);
2882         }
2883
2884         return IRQ_HANDLED;
2885 }
2886
2887 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2888 {
2889         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2890                 dev_warn(&hdev->pdev->dev,
2891                          "vector(vector_id %d) has been freed.\n", vector_id);
2892                 return;
2893         }
2894
2895         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2896         hdev->num_msi_left += 1;
2897         hdev->num_msi_used -= 1;
2898 }
2899
2900 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2901 {
2902         struct hclge_misc_vector *vector = &hdev->misc_vector;
2903
2904         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2905
2906         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2907         hdev->vector_status[0] = 0;
2908
2909         hdev->num_msi_left -= 1;
2910         hdev->num_msi_used += 1;
2911 }
2912
2913 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2914 {
2915         int ret;
2916
2917         hclge_get_misc_vector(hdev);
2918
2919         /* this would be explicitly freed in the end */
2920         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2921                           0, "hclge_misc", hdev);
2922         if (ret) {
2923                 hclge_free_vector(hdev, 0);
2924                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2925                         hdev->misc_vector.vector_irq);
2926         }
2927
2928         return ret;
2929 }
2930
2931 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2932 {
2933         free_irq(hdev->misc_vector.vector_irq, hdev);
2934         hclge_free_vector(hdev, 0);
2935 }
2936
2937 int hclge_notify_client(struct hclge_dev *hdev,
2938                         enum hnae3_reset_notify_type type)
2939 {
2940         struct hnae3_client *client = hdev->nic_client;
2941         u16 i;
2942
2943         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
2944                 return 0;
2945
2946         if (!client->ops->reset_notify)
2947                 return -EOPNOTSUPP;
2948
2949         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2950                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2951                 int ret;
2952
2953                 ret = client->ops->reset_notify(handle, type);
2954                 if (ret) {
2955                         dev_err(&hdev->pdev->dev,
2956                                 "notify nic client failed %d(%d)\n", type, ret);
2957                         return ret;
2958                 }
2959         }
2960
2961         return 0;
2962 }
2963
2964 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2965                                     enum hnae3_reset_notify_type type)
2966 {
2967         struct hnae3_client *client = hdev->roce_client;
2968         int ret = 0;
2969         u16 i;
2970
2971         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
2972                 return 0;
2973
2974         if (!client->ops->reset_notify)
2975                 return -EOPNOTSUPP;
2976
2977         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2978                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2979
2980                 ret = client->ops->reset_notify(handle, type);
2981                 if (ret) {
2982                         dev_err(&hdev->pdev->dev,
2983                                 "notify roce client failed %d(%d)",
2984                                 type, ret);
2985                         return ret;
2986                 }
2987         }
2988
2989         return ret;
2990 }
2991
2992 static int hclge_reset_wait(struct hclge_dev *hdev)
2993 {
2994 #define HCLGE_RESET_WATI_MS     100
2995 #define HCLGE_RESET_WAIT_CNT    200
2996         u32 val, reg, reg_bit;
2997         u32 cnt = 0;
2998
2999         switch (hdev->reset_type) {
3000         case HNAE3_IMP_RESET:
3001                 reg = HCLGE_GLOBAL_RESET_REG;
3002                 reg_bit = HCLGE_IMP_RESET_BIT;
3003                 break;
3004         case HNAE3_GLOBAL_RESET:
3005                 reg = HCLGE_GLOBAL_RESET_REG;
3006                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3007                 break;
3008         case HNAE3_FUNC_RESET:
3009                 reg = HCLGE_FUN_RST_ING;
3010                 reg_bit = HCLGE_FUN_RST_ING_B;
3011                 break;
3012         case HNAE3_FLR_RESET:
3013                 break;
3014         default:
3015                 dev_err(&hdev->pdev->dev,
3016                         "Wait for unsupported reset type: %d\n",
3017                         hdev->reset_type);
3018                 return -EINVAL;
3019         }
3020
3021         if (hdev->reset_type == HNAE3_FLR_RESET) {
3022                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3023                        cnt++ < HCLGE_RESET_WAIT_CNT)
3024                         msleep(HCLGE_RESET_WATI_MS);
3025
3026                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3027                         dev_err(&hdev->pdev->dev,
3028                                 "flr wait timeout: %d\n", cnt);
3029                         return -EBUSY;
3030                 }
3031
3032                 return 0;
3033         }
3034
3035         val = hclge_read_dev(&hdev->hw, reg);
3036         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3037                 msleep(HCLGE_RESET_WATI_MS);
3038                 val = hclge_read_dev(&hdev->hw, reg);
3039                 cnt++;
3040         }
3041
3042         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3043                 dev_warn(&hdev->pdev->dev,
3044                          "Wait for reset timeout: %d\n", hdev->reset_type);
3045                 return -EBUSY;
3046         }
3047
3048         return 0;
3049 }
3050
3051 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3052 {
3053         struct hclge_vf_rst_cmd *req;
3054         struct hclge_desc desc;
3055
3056         req = (struct hclge_vf_rst_cmd *)desc.data;
3057         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3058         req->dest_vfid = func_id;
3059
3060         if (reset)
3061                 req->vf_rst = 0x1;
3062
3063         return hclge_cmd_send(&hdev->hw, &desc, 1);
3064 }
3065
3066 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3067 {
3068         int i;
3069
3070         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3071                 struct hclge_vport *vport = &hdev->vport[i];
3072                 int ret;
3073
3074                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3075                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3076                 if (ret) {
3077                         dev_err(&hdev->pdev->dev,
3078                                 "set vf(%d) rst failed %d!\n",
3079                                 vport->vport_id, ret);
3080                         return ret;
3081                 }
3082
3083                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3084                         continue;
3085
3086                 /* Inform VF to process the reset.
3087                  * hclge_inform_reset_assert_to_vf may fail if VF
3088                  * driver is not loaded.
3089                  */
3090                 ret = hclge_inform_reset_assert_to_vf(vport);
3091                 if (ret)
3092                         dev_warn(&hdev->pdev->dev,
3093                                  "inform reset to vf(%d) failed %d!\n",
3094                                  vport->vport_id, ret);
3095         }
3096
3097         return 0;
3098 }
3099
3100 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3101 {
3102         struct hclge_desc desc;
3103         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3104         int ret;
3105
3106         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3107         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3108         req->fun_reset_vfid = func_id;
3109
3110         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3111         if (ret)
3112                 dev_err(&hdev->pdev->dev,
3113                         "send function reset cmd fail, status =%d\n", ret);
3114
3115         return ret;
3116 }
3117
3118 static void hclge_do_reset(struct hclge_dev *hdev)
3119 {
3120         struct hnae3_handle *handle = &hdev->vport[0].nic;
3121         struct pci_dev *pdev = hdev->pdev;
3122         u32 val;
3123
3124         if (hclge_get_hw_reset_stat(handle)) {
3125                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3126                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3127                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3128                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3129                 return;
3130         }
3131
3132         switch (hdev->reset_type) {
3133         case HNAE3_GLOBAL_RESET:
3134                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3135                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3136                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3137                 dev_info(&pdev->dev, "Global Reset requested\n");
3138                 break;
3139         case HNAE3_FUNC_RESET:
3140                 dev_info(&pdev->dev, "PF Reset requested\n");
3141                 /* schedule again to check later */
3142                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3143                 hclge_reset_task_schedule(hdev);
3144                 break;
3145         case HNAE3_FLR_RESET:
3146                 dev_info(&pdev->dev, "FLR requested\n");
3147                 /* schedule again to check later */
3148                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3149                 hclge_reset_task_schedule(hdev);
3150                 break;
3151         default:
3152                 dev_warn(&pdev->dev,
3153                          "Unsupported reset type: %d\n", hdev->reset_type);
3154                 break;
3155         }
3156 }
3157
3158 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3159                                                    unsigned long *addr)
3160 {
3161         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3162         struct hclge_dev *hdev = ae_dev->priv;
3163
3164         /* first, resolve any unknown reset type to the known type(s) */
3165         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3166                 /* we will intentionally ignore any errors from this function
3167                  *  as we will end up in *some* reset request in any case
3168                  */
3169                 hclge_handle_hw_msix_error(hdev, addr);
3170                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3171                 /* We defered the clearing of the error event which caused
3172                  * interrupt since it was not posssible to do that in
3173                  * interrupt context (and this is the reason we introduced
3174                  * new UNKNOWN reset type). Now, the errors have been
3175                  * handled and cleared in hardware we can safely enable
3176                  * interrupts. This is an exception to the norm.
3177                  */
3178                 hclge_enable_vector(&hdev->misc_vector, true);
3179         }
3180
3181         /* return the highest priority reset level amongst all */
3182         if (test_bit(HNAE3_IMP_RESET, addr)) {
3183                 rst_level = HNAE3_IMP_RESET;
3184                 clear_bit(HNAE3_IMP_RESET, addr);
3185                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3186                 clear_bit(HNAE3_FUNC_RESET, addr);
3187         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3188                 rst_level = HNAE3_GLOBAL_RESET;
3189                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3190                 clear_bit(HNAE3_FUNC_RESET, addr);
3191         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3192                 rst_level = HNAE3_FUNC_RESET;
3193                 clear_bit(HNAE3_FUNC_RESET, addr);
3194         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3195                 rst_level = HNAE3_FLR_RESET;
3196                 clear_bit(HNAE3_FLR_RESET, addr);
3197         }
3198
3199         if (hdev->reset_type != HNAE3_NONE_RESET &&
3200             rst_level < hdev->reset_type)
3201                 return HNAE3_NONE_RESET;
3202
3203         return rst_level;
3204 }
3205
3206 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3207 {
3208         u32 clearval = 0;
3209
3210         switch (hdev->reset_type) {
3211         case HNAE3_IMP_RESET:
3212                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3213                 break;
3214         case HNAE3_GLOBAL_RESET:
3215                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3216                 break;
3217         default:
3218                 break;
3219         }
3220
3221         if (!clearval)
3222                 return;
3223
3224         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3225         hclge_enable_vector(&hdev->misc_vector, true);
3226 }
3227
3228 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3229 {
3230         int ret = 0;
3231
3232         switch (hdev->reset_type) {
3233         case HNAE3_FUNC_RESET:
3234                 /* fall through */
3235         case HNAE3_FLR_RESET:
3236                 ret = hclge_set_all_vf_rst(hdev, true);
3237                 break;
3238         default:
3239                 break;
3240         }
3241
3242         return ret;
3243 }
3244
3245 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3246 {
3247 #define HCLGE_RESET_SYNC_TIME 100
3248
3249         u32 reg_val;
3250         int ret = 0;
3251
3252         switch (hdev->reset_type) {
3253         case HNAE3_FUNC_RESET:
3254                 /* There is no mechanism for PF to know if VF has stopped IO
3255                  * for now, just wait 100 ms for VF to stop IO
3256                  */
3257                 msleep(HCLGE_RESET_SYNC_TIME);
3258                 ret = hclge_func_reset_cmd(hdev, 0);
3259                 if (ret) {
3260                         dev_err(&hdev->pdev->dev,
3261                                 "asserting function reset fail %d!\n", ret);
3262                         return ret;
3263                 }
3264
3265                 /* After performaning pf reset, it is not necessary to do the
3266                  * mailbox handling or send any command to firmware, because
3267                  * any mailbox handling or command to firmware is only valid
3268                  * after hclge_cmd_init is called.
3269                  */
3270                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3271                 hdev->rst_stats.pf_rst_cnt++;
3272                 break;
3273         case HNAE3_FLR_RESET:
3274                 /* There is no mechanism for PF to know if VF has stopped IO
3275                  * for now, just wait 100 ms for VF to stop IO
3276                  */
3277                 msleep(HCLGE_RESET_SYNC_TIME);
3278                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3279                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3280                 hdev->rst_stats.flr_rst_cnt++;
3281                 break;
3282         case HNAE3_IMP_RESET:
3283                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3284                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3285                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3286                 break;
3287         default:
3288                 break;
3289         }
3290
3291         /* inform hardware that preparatory work is done */
3292         msleep(HCLGE_RESET_SYNC_TIME);
3293         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3294                         HCLGE_NIC_CMQ_ENABLE);
3295         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3296
3297         return ret;
3298 }
3299
3300 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3301 {
3302 #define MAX_RESET_FAIL_CNT 5
3303
3304         if (hdev->reset_pending) {
3305                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3306                          hdev->reset_pending);
3307                 return true;
3308         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3309                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3310                     BIT(HCLGE_IMP_RESET_BIT))) {
3311                 dev_info(&hdev->pdev->dev,
3312                          "reset failed because IMP Reset is pending\n");
3313                 hclge_clear_reset_cause(hdev);
3314                 return false;
3315         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3316                 hdev->reset_fail_cnt++;
3317                 if (is_timeout) {
3318                         set_bit(hdev->reset_type, &hdev->reset_pending);
3319                         dev_info(&hdev->pdev->dev,
3320                                  "re-schedule to wait for hw reset done\n");
3321                         return true;
3322                 }
3323
3324                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3325                 hclge_clear_reset_cause(hdev);
3326                 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3327                 mod_timer(&hdev->reset_timer,
3328                           jiffies + HCLGE_RESET_INTERVAL);
3329
3330                 return false;
3331         }
3332
3333         hclge_clear_reset_cause(hdev);
3334         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3335         return false;
3336 }
3337
3338 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3339 {
3340         int ret = 0;
3341
3342         switch (hdev->reset_type) {
3343         case HNAE3_FUNC_RESET:
3344                 /* fall through */
3345         case HNAE3_FLR_RESET:
3346                 ret = hclge_set_all_vf_rst(hdev, false);
3347                 break;
3348         default:
3349                 break;
3350         }
3351
3352         return ret;
3353 }
3354
3355 static int hclge_reset_stack(struct hclge_dev *hdev)
3356 {
3357         int ret;
3358
3359         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3360         if (ret)
3361                 return ret;
3362
3363         ret = hclge_reset_ae_dev(hdev->ae_dev);
3364         if (ret)
3365                 return ret;
3366
3367         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3368         if (ret)
3369                 return ret;
3370
3371         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3372 }
3373
3374 static void hclge_reset(struct hclge_dev *hdev)
3375 {
3376         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3377         bool is_timeout = false;
3378         int ret;
3379
3380         /* Initialize ae_dev reset status as well, in case enet layer wants to
3381          * know if device is undergoing reset
3382          */
3383         ae_dev->reset_type = hdev->reset_type;
3384         hdev->rst_stats.reset_cnt++;
3385         /* perform reset of the stack & ae device for a client */
3386         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3387         if (ret)
3388                 goto err_reset;
3389
3390         ret = hclge_reset_prepare_down(hdev);
3391         if (ret)
3392                 goto err_reset;
3393
3394         rtnl_lock();
3395         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3396         if (ret)
3397                 goto err_reset_lock;
3398
3399         rtnl_unlock();
3400
3401         ret = hclge_reset_prepare_wait(hdev);
3402         if (ret)
3403                 goto err_reset;
3404
3405         if (hclge_reset_wait(hdev)) {
3406                 is_timeout = true;
3407                 goto err_reset;
3408         }
3409
3410         hdev->rst_stats.hw_reset_done_cnt++;
3411
3412         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3413         if (ret)
3414                 goto err_reset;
3415
3416         rtnl_lock();
3417
3418         ret = hclge_reset_stack(hdev);
3419         if (ret)
3420                 goto err_reset_lock;
3421
3422         hclge_clear_reset_cause(hdev);
3423
3424         ret = hclge_reset_prepare_up(hdev);
3425         if (ret)
3426                 goto err_reset_lock;
3427
3428         rtnl_unlock();
3429
3430         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3431         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3432          * times
3433          */
3434         if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3435                 goto err_reset;
3436
3437         rtnl_lock();
3438
3439         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3440         if (ret)
3441                 goto err_reset_lock;
3442
3443         rtnl_unlock();
3444
3445         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3446         if (ret)
3447                 goto err_reset;
3448
3449         hdev->last_reset_time = jiffies;
3450         hdev->reset_fail_cnt = 0;
3451         hdev->rst_stats.reset_done_cnt++;
3452         ae_dev->reset_type = HNAE3_NONE_RESET;
3453         del_timer(&hdev->reset_timer);
3454
3455         return;
3456
3457 err_reset_lock:
3458         rtnl_unlock();
3459 err_reset:
3460         if (hclge_reset_err_handle(hdev, is_timeout))
3461                 hclge_reset_task_schedule(hdev);
3462 }
3463
3464 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3465 {
3466         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3467         struct hclge_dev *hdev = ae_dev->priv;
3468
3469         /* We might end up getting called broadly because of 2 below cases:
3470          * 1. Recoverable error was conveyed through APEI and only way to bring
3471          *    normalcy is to reset.
3472          * 2. A new reset request from the stack due to timeout
3473          *
3474          * For the first case,error event might not have ae handle available.
3475          * check if this is a new reset request and we are not here just because
3476          * last reset attempt did not succeed and watchdog hit us again. We will
3477          * know this if last reset request did not occur very recently (watchdog
3478          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3479          * In case of new request we reset the "reset level" to PF reset.
3480          * And if it is a repeat reset request of the most recent one then we
3481          * want to make sure we throttle the reset request. Therefore, we will
3482          * not allow it again before 3*HZ times.
3483          */
3484         if (!handle)
3485                 handle = &hdev->vport[0].nic;
3486
3487         if (time_before(jiffies, (hdev->last_reset_time +
3488                                   HCLGE_RESET_INTERVAL)))
3489                 return;
3490         else if (hdev->default_reset_request)
3491                 hdev->reset_level =
3492                         hclge_get_reset_level(ae_dev,
3493                                               &hdev->default_reset_request);
3494         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3495                 hdev->reset_level = HNAE3_FUNC_RESET;
3496
3497         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3498                  hdev->reset_level);
3499
3500         /* request reset & schedule reset task */
3501         set_bit(hdev->reset_level, &hdev->reset_request);
3502         hclge_reset_task_schedule(hdev);
3503
3504         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3505                 hdev->reset_level++;
3506 }
3507
3508 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3509                                         enum hnae3_reset_type rst_type)
3510 {
3511         struct hclge_dev *hdev = ae_dev->priv;
3512
3513         set_bit(rst_type, &hdev->default_reset_request);
3514 }
3515
3516 static void hclge_reset_timer(struct timer_list *t)
3517 {
3518         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3519
3520         dev_info(&hdev->pdev->dev,
3521                  "triggering reset in reset timer\n");
3522         hclge_reset_event(hdev->pdev, NULL);
3523 }
3524
3525 static void hclge_reset_subtask(struct hclge_dev *hdev)
3526 {
3527         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3528
3529         /* check if there is any ongoing reset in the hardware. This status can
3530          * be checked from reset_pending. If there is then, we need to wait for
3531          * hardware to complete reset.
3532          *    a. If we are able to figure out in reasonable time that hardware
3533          *       has fully resetted then, we can proceed with driver, client
3534          *       reset.
3535          *    b. else, we can come back later to check this status so re-sched
3536          *       now.
3537          */
3538         hdev->last_reset_time = jiffies;
3539         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3540         if (hdev->reset_type != HNAE3_NONE_RESET)
3541                 hclge_reset(hdev);
3542
3543         /* check if we got any *new* reset requests to be honored */
3544         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3545         if (hdev->reset_type != HNAE3_NONE_RESET)
3546                 hclge_do_reset(hdev);
3547
3548         hdev->reset_type = HNAE3_NONE_RESET;
3549 }
3550
3551 static void hclge_reset_service_task(struct work_struct *work)
3552 {
3553         struct hclge_dev *hdev =
3554                 container_of(work, struct hclge_dev, rst_service_task);
3555
3556         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3557                 return;
3558
3559         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3560
3561         hclge_reset_subtask(hdev);
3562
3563         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3564 }
3565
3566 static void hclge_mailbox_service_task(struct work_struct *work)
3567 {
3568         struct hclge_dev *hdev =
3569                 container_of(work, struct hclge_dev, mbx_service_task);
3570
3571         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3572                 return;
3573
3574         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3575
3576         hclge_mbx_handler(hdev);
3577
3578         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3579 }
3580
3581 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3582 {
3583         int i;
3584
3585         /* start from vport 1 for PF is always alive */
3586         for (i = 1; i < hdev->num_alloc_vport; i++) {
3587                 struct hclge_vport *vport = &hdev->vport[i];
3588
3589                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3590                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3591
3592                 /* If vf is not alive, set to default value */
3593                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3594                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3595         }
3596 }
3597
3598 static void hclge_service_task(struct work_struct *work)
3599 {
3600         struct hclge_dev *hdev =
3601                 container_of(work, struct hclge_dev, service_task);
3602
3603         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3604                 hclge_update_stats_for_all(hdev);
3605                 hdev->hw_stats.stats_timer = 0;
3606         }
3607
3608         hclge_update_port_info(hdev);
3609         hclge_update_link_status(hdev);
3610         hclge_update_vport_alive(hdev);
3611         hclge_sync_vlan_filter(hdev);
3612         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3613                 hclge_rfs_filter_expire(hdev);
3614                 hdev->fd_arfs_expire_timer = 0;
3615         }
3616         hclge_service_complete(hdev);
3617 }
3618
3619 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3620 {
3621         /* VF handle has no client */
3622         if (!handle->client)
3623                 return container_of(handle, struct hclge_vport, nic);
3624         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3625                 return container_of(handle, struct hclge_vport, roce);
3626         else
3627                 return container_of(handle, struct hclge_vport, nic);
3628 }
3629
3630 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3631                             struct hnae3_vector_info *vector_info)
3632 {
3633         struct hclge_vport *vport = hclge_get_vport(handle);
3634         struct hnae3_vector_info *vector = vector_info;
3635         struct hclge_dev *hdev = vport->back;
3636         int alloc = 0;
3637         int i, j;
3638
3639         vector_num = min(hdev->num_msi_left, vector_num);
3640
3641         for (j = 0; j < vector_num; j++) {
3642                 for (i = 1; i < hdev->num_msi; i++) {
3643                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3644                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3645                                 vector->io_addr = hdev->hw.io_base +
3646                                         HCLGE_VECTOR_REG_BASE +
3647                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3648                                         vport->vport_id *
3649                                         HCLGE_VECTOR_VF_OFFSET;
3650                                 hdev->vector_status[i] = vport->vport_id;
3651                                 hdev->vector_irq[i] = vector->vector;
3652
3653                                 vector++;
3654                                 alloc++;
3655
3656                                 break;
3657                         }
3658                 }
3659         }
3660         hdev->num_msi_left -= alloc;
3661         hdev->num_msi_used += alloc;
3662
3663         return alloc;
3664 }
3665
3666 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3667 {
3668         int i;
3669
3670         for (i = 0; i < hdev->num_msi; i++)
3671                 if (vector == hdev->vector_irq[i])
3672                         return i;
3673
3674         return -EINVAL;
3675 }
3676
3677 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3678 {
3679         struct hclge_vport *vport = hclge_get_vport(handle);
3680         struct hclge_dev *hdev = vport->back;
3681         int vector_id;
3682
3683         vector_id = hclge_get_vector_index(hdev, vector);
3684         if (vector_id < 0) {
3685                 dev_err(&hdev->pdev->dev,
3686                         "Get vector index fail. vector_id =%d\n", vector_id);
3687                 return vector_id;
3688         }
3689
3690         hclge_free_vector(hdev, vector_id);
3691
3692         return 0;
3693 }
3694
3695 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3696 {
3697         return HCLGE_RSS_KEY_SIZE;
3698 }
3699
3700 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3701 {
3702         return HCLGE_RSS_IND_TBL_SIZE;
3703 }
3704
3705 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3706                                   const u8 hfunc, const u8 *key)
3707 {
3708         struct hclge_rss_config_cmd *req;
3709         unsigned int key_offset = 0;
3710         struct hclge_desc desc;
3711         int key_counts;
3712         int key_size;
3713         int ret;
3714
3715         key_counts = HCLGE_RSS_KEY_SIZE;
3716         req = (struct hclge_rss_config_cmd *)desc.data;
3717
3718         while (key_counts) {
3719                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3720                                            false);
3721
3722                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3723                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3724
3725                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3726                 memcpy(req->hash_key,
3727                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3728
3729                 key_counts -= key_size;
3730                 key_offset++;
3731                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3732                 if (ret) {
3733                         dev_err(&hdev->pdev->dev,
3734                                 "Configure RSS config fail, status = %d\n",
3735                                 ret);
3736                         return ret;
3737                 }
3738         }
3739         return 0;
3740 }
3741
3742 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3743 {
3744         struct hclge_rss_indirection_table_cmd *req;
3745         struct hclge_desc desc;
3746         int i, j;
3747         int ret;
3748
3749         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3750
3751         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3752                 hclge_cmd_setup_basic_desc
3753                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3754
3755                 req->start_table_index =
3756                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3757                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3758
3759                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3760                         req->rss_result[j] =
3761                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3762
3763                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3764                 if (ret) {
3765                         dev_err(&hdev->pdev->dev,
3766                                 "Configure rss indir table fail,status = %d\n",
3767                                 ret);
3768                         return ret;
3769                 }
3770         }
3771         return 0;
3772 }
3773
3774 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3775                                  u16 *tc_size, u16 *tc_offset)
3776 {
3777         struct hclge_rss_tc_mode_cmd *req;
3778         struct hclge_desc desc;
3779         int ret;
3780         int i;
3781
3782         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3783         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3784
3785         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3786                 u16 mode = 0;
3787
3788                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3789                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3790                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3791                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3792                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3793
3794                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3795         }
3796
3797         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3798         if (ret)
3799                 dev_err(&hdev->pdev->dev,
3800                         "Configure rss tc mode fail, status = %d\n", ret);
3801
3802         return ret;
3803 }
3804
3805 static void hclge_get_rss_type(struct hclge_vport *vport)
3806 {
3807         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3808             vport->rss_tuple_sets.ipv4_udp_en ||
3809             vport->rss_tuple_sets.ipv4_sctp_en ||
3810             vport->rss_tuple_sets.ipv6_tcp_en ||
3811             vport->rss_tuple_sets.ipv6_udp_en ||
3812             vport->rss_tuple_sets.ipv6_sctp_en)
3813                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3814         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3815                  vport->rss_tuple_sets.ipv6_fragment_en)
3816                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3817         else
3818                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3819 }
3820
3821 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3822 {
3823         struct hclge_rss_input_tuple_cmd *req;
3824         struct hclge_desc desc;
3825         int ret;
3826
3827         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3828
3829         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3830
3831         /* Get the tuple cfg from pf */
3832         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3833         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3834         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3835         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3836         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3837         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3838         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3839         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3840         hclge_get_rss_type(&hdev->vport[0]);
3841         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3842         if (ret)
3843                 dev_err(&hdev->pdev->dev,
3844                         "Configure rss input fail, status = %d\n", ret);
3845         return ret;
3846 }
3847
3848 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3849                          u8 *key, u8 *hfunc)
3850 {
3851         struct hclge_vport *vport = hclge_get_vport(handle);
3852         int i;
3853
3854         /* Get hash algorithm */
3855         if (hfunc) {
3856                 switch (vport->rss_algo) {
3857                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3858                         *hfunc = ETH_RSS_HASH_TOP;
3859                         break;
3860                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3861                         *hfunc = ETH_RSS_HASH_XOR;
3862                         break;
3863                 default:
3864                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3865                         break;
3866                 }
3867         }
3868
3869         /* Get the RSS Key required by the user */
3870         if (key)
3871                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3872
3873         /* Get indirect table */
3874         if (indir)
3875                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3876                         indir[i] =  vport->rss_indirection_tbl[i];
3877
3878         return 0;
3879 }
3880
3881 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3882                          const  u8 *key, const  u8 hfunc)
3883 {
3884         struct hclge_vport *vport = hclge_get_vport(handle);
3885         struct hclge_dev *hdev = vport->back;
3886         u8 hash_algo;
3887         int ret, i;
3888
3889         /* Set the RSS Hash Key if specififed by the user */
3890         if (key) {
3891                 switch (hfunc) {
3892                 case ETH_RSS_HASH_TOP:
3893                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3894                         break;
3895                 case ETH_RSS_HASH_XOR:
3896                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3897                         break;
3898                 case ETH_RSS_HASH_NO_CHANGE:
3899                         hash_algo = vport->rss_algo;
3900                         break;
3901                 default:
3902                         return -EINVAL;
3903                 }
3904
3905                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3906                 if (ret)
3907                         return ret;
3908
3909                 /* Update the shadow RSS key with user specified qids */
3910                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3911                 vport->rss_algo = hash_algo;
3912         }
3913
3914         /* Update the shadow RSS table with user specified qids */
3915         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3916                 vport->rss_indirection_tbl[i] = indir[i];
3917
3918         /* Update the hardware */
3919         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3920 }
3921
3922 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3923 {
3924         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3925
3926         if (nfc->data & RXH_L4_B_2_3)
3927                 hash_sets |= HCLGE_D_PORT_BIT;
3928         else
3929                 hash_sets &= ~HCLGE_D_PORT_BIT;
3930
3931         if (nfc->data & RXH_IP_SRC)
3932                 hash_sets |= HCLGE_S_IP_BIT;
3933         else
3934                 hash_sets &= ~HCLGE_S_IP_BIT;
3935
3936         if (nfc->data & RXH_IP_DST)
3937                 hash_sets |= HCLGE_D_IP_BIT;
3938         else
3939                 hash_sets &= ~HCLGE_D_IP_BIT;
3940
3941         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3942                 hash_sets |= HCLGE_V_TAG_BIT;
3943
3944         return hash_sets;
3945 }
3946
3947 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3948                                struct ethtool_rxnfc *nfc)
3949 {
3950         struct hclge_vport *vport = hclge_get_vport(handle);
3951         struct hclge_dev *hdev = vport->back;
3952         struct hclge_rss_input_tuple_cmd *req;
3953         struct hclge_desc desc;
3954         u8 tuple_sets;
3955         int ret;
3956
3957         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3958                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3959                 return -EINVAL;
3960
3961         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3962         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3963
3964         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3965         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3966         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3967         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3968         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3969         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3970         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3971         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3972
3973         tuple_sets = hclge_get_rss_hash_bits(nfc);
3974         switch (nfc->flow_type) {
3975         case TCP_V4_FLOW:
3976                 req->ipv4_tcp_en = tuple_sets;
3977                 break;
3978         case TCP_V6_FLOW:
3979                 req->ipv6_tcp_en = tuple_sets;
3980                 break;
3981         case UDP_V4_FLOW:
3982                 req->ipv4_udp_en = tuple_sets;
3983                 break;
3984         case UDP_V6_FLOW:
3985                 req->ipv6_udp_en = tuple_sets;
3986                 break;
3987         case SCTP_V4_FLOW:
3988                 req->ipv4_sctp_en = tuple_sets;
3989                 break;
3990         case SCTP_V6_FLOW:
3991                 if ((nfc->data & RXH_L4_B_0_1) ||
3992                     (nfc->data & RXH_L4_B_2_3))
3993                         return -EINVAL;
3994
3995                 req->ipv6_sctp_en = tuple_sets;
3996                 break;
3997         case IPV4_FLOW:
3998                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3999                 break;
4000         case IPV6_FLOW:
4001                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4002                 break;
4003         default:
4004                 return -EINVAL;
4005         }
4006
4007         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4008         if (ret) {
4009                 dev_err(&hdev->pdev->dev,
4010                         "Set rss tuple fail, status = %d\n", ret);
4011                 return ret;
4012         }
4013
4014         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4015         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4016         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4017         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4018         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4019         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4020         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4021         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4022         hclge_get_rss_type(vport);
4023         return 0;
4024 }
4025
4026 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4027                                struct ethtool_rxnfc *nfc)
4028 {
4029         struct hclge_vport *vport = hclge_get_vport(handle);
4030         u8 tuple_sets;
4031
4032         nfc->data = 0;
4033
4034         switch (nfc->flow_type) {
4035         case TCP_V4_FLOW:
4036                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4037                 break;
4038         case UDP_V4_FLOW:
4039                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4040                 break;
4041         case TCP_V6_FLOW:
4042                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4043                 break;
4044         case UDP_V6_FLOW:
4045                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4046                 break;
4047         case SCTP_V4_FLOW:
4048                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4049                 break;
4050         case SCTP_V6_FLOW:
4051                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4052                 break;
4053         case IPV4_FLOW:
4054         case IPV6_FLOW:
4055                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4056                 break;
4057         default:
4058                 return -EINVAL;
4059         }
4060
4061         if (!tuple_sets)
4062                 return 0;
4063
4064         if (tuple_sets & HCLGE_D_PORT_BIT)
4065                 nfc->data |= RXH_L4_B_2_3;
4066         if (tuple_sets & HCLGE_S_PORT_BIT)
4067                 nfc->data |= RXH_L4_B_0_1;
4068         if (tuple_sets & HCLGE_D_IP_BIT)
4069                 nfc->data |= RXH_IP_DST;
4070         if (tuple_sets & HCLGE_S_IP_BIT)
4071                 nfc->data |= RXH_IP_SRC;
4072
4073         return 0;
4074 }
4075
4076 static int hclge_get_tc_size(struct hnae3_handle *handle)
4077 {
4078         struct hclge_vport *vport = hclge_get_vport(handle);
4079         struct hclge_dev *hdev = vport->back;
4080
4081         return hdev->rss_size_max;
4082 }
4083
4084 int hclge_rss_init_hw(struct hclge_dev *hdev)
4085 {
4086         struct hclge_vport *vport = hdev->vport;
4087         u8 *rss_indir = vport[0].rss_indirection_tbl;
4088         u16 rss_size = vport[0].alloc_rss_size;
4089         u8 *key = vport[0].rss_hash_key;
4090         u8 hfunc = vport[0].rss_algo;
4091         u16 tc_offset[HCLGE_MAX_TC_NUM];
4092         u16 tc_valid[HCLGE_MAX_TC_NUM];
4093         u16 tc_size[HCLGE_MAX_TC_NUM];
4094         u16 roundup_size;
4095         unsigned int i;
4096         int ret;
4097
4098         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4099         if (ret)
4100                 return ret;
4101
4102         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4103         if (ret)
4104                 return ret;
4105
4106         ret = hclge_set_rss_input_tuple(hdev);
4107         if (ret)
4108                 return ret;
4109
4110         /* Each TC have the same queue size, and tc_size set to hardware is
4111          * the log2 of roundup power of two of rss_size, the acutal queue
4112          * size is limited by indirection table.
4113          */
4114         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4115                 dev_err(&hdev->pdev->dev,
4116                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4117                         rss_size);
4118                 return -EINVAL;
4119         }
4120
4121         roundup_size = roundup_pow_of_two(rss_size);
4122         roundup_size = ilog2(roundup_size);
4123
4124         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4125                 tc_valid[i] = 0;
4126
4127                 if (!(hdev->hw_tc_map & BIT(i)))
4128                         continue;
4129
4130                 tc_valid[i] = 1;
4131                 tc_size[i] = roundup_size;
4132                 tc_offset[i] = rss_size * i;
4133         }
4134
4135         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4136 }
4137
4138 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4139 {
4140         struct hclge_vport *vport = hdev->vport;
4141         int i, j;
4142
4143         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4144                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4145                         vport[j].rss_indirection_tbl[i] =
4146                                 i % vport[j].alloc_rss_size;
4147         }
4148 }
4149
4150 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4151 {
4152         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4153         struct hclge_vport *vport = hdev->vport;
4154
4155         if (hdev->pdev->revision >= 0x21)
4156                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4157
4158         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4159                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4160                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4161                 vport[i].rss_tuple_sets.ipv4_udp_en =
4162                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4163                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4164                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4165                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4166                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4167                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4168                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4169                 vport[i].rss_tuple_sets.ipv6_udp_en =
4170                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4171                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4172                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4173                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4174                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4175
4176                 vport[i].rss_algo = rss_algo;
4177
4178                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4179                        HCLGE_RSS_KEY_SIZE);
4180         }
4181
4182         hclge_rss_indir_init_cfg(hdev);
4183 }
4184
4185 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4186                                 int vector_id, bool en,
4187                                 struct hnae3_ring_chain_node *ring_chain)
4188 {
4189         struct hclge_dev *hdev = vport->back;
4190         struct hnae3_ring_chain_node *node;
4191         struct hclge_desc desc;
4192         struct hclge_ctrl_vector_chain_cmd *req
4193                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4194         enum hclge_cmd_status status;
4195         enum hclge_opcode_type op;
4196         u16 tqp_type_and_id;
4197         int i;
4198
4199         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4200         hclge_cmd_setup_basic_desc(&desc, op, false);
4201         req->int_vector_id = vector_id;
4202
4203         i = 0;
4204         for (node = ring_chain; node; node = node->next) {
4205                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4206                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4207                                 HCLGE_INT_TYPE_S,
4208                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4209                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4210                                 HCLGE_TQP_ID_S, node->tqp_index);
4211                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4212                                 HCLGE_INT_GL_IDX_S,
4213                                 hnae3_get_field(node->int_gl_idx,
4214                                                 HNAE3_RING_GL_IDX_M,
4215                                                 HNAE3_RING_GL_IDX_S));
4216                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4217                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4218                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4219                         req->vfid = vport->vport_id;
4220
4221                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4222                         if (status) {
4223                                 dev_err(&hdev->pdev->dev,
4224                                         "Map TQP fail, status is %d.\n",
4225                                         status);
4226                                 return -EIO;
4227                         }
4228                         i = 0;
4229
4230                         hclge_cmd_setup_basic_desc(&desc,
4231                                                    op,
4232                                                    false);
4233                         req->int_vector_id = vector_id;
4234                 }
4235         }
4236
4237         if (i > 0) {
4238                 req->int_cause_num = i;
4239                 req->vfid = vport->vport_id;
4240                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4241                 if (status) {
4242                         dev_err(&hdev->pdev->dev,
4243                                 "Map TQP fail, status is %d.\n", status);
4244                         return -EIO;
4245                 }
4246         }
4247
4248         return 0;
4249 }
4250
4251 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4252                                     struct hnae3_ring_chain_node *ring_chain)
4253 {
4254         struct hclge_vport *vport = hclge_get_vport(handle);
4255         struct hclge_dev *hdev = vport->back;
4256         int vector_id;
4257
4258         vector_id = hclge_get_vector_index(hdev, vector);
4259         if (vector_id < 0) {
4260                 dev_err(&hdev->pdev->dev,
4261                         "Get vector index fail. vector_id =%d\n", vector_id);
4262                 return vector_id;
4263         }
4264
4265         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4266 }
4267
4268 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4269                                        struct hnae3_ring_chain_node *ring_chain)
4270 {
4271         struct hclge_vport *vport = hclge_get_vport(handle);
4272         struct hclge_dev *hdev = vport->back;
4273         int vector_id, ret;
4274
4275         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4276                 return 0;
4277
4278         vector_id = hclge_get_vector_index(hdev, vector);
4279         if (vector_id < 0) {
4280                 dev_err(&handle->pdev->dev,
4281                         "Get vector index fail. ret =%d\n", vector_id);
4282                 return vector_id;
4283         }
4284
4285         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4286         if (ret)
4287                 dev_err(&handle->pdev->dev,
4288                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4289                         vector_id, ret);
4290
4291         return ret;
4292 }
4293
4294 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4295                                struct hclge_promisc_param *param)
4296 {
4297         struct hclge_promisc_cfg_cmd *req;
4298         struct hclge_desc desc;
4299         int ret;
4300
4301         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4302
4303         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4304         req->vf_id = param->vf_id;
4305
4306         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4307          * pdev revision(0x20), new revision support them. The
4308          * value of this two fields will not return error when driver
4309          * send command to fireware in revision(0x20).
4310          */
4311         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4312                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4313
4314         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4315         if (ret)
4316                 dev_err(&hdev->pdev->dev,
4317                         "Set promisc mode fail, status is %d.\n", ret);
4318
4319         return ret;
4320 }
4321
4322 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4323                               bool en_mc, bool en_bc, int vport_id)
4324 {
4325         if (!param)
4326                 return;
4327
4328         memset(param, 0, sizeof(struct hclge_promisc_param));
4329         if (en_uc)
4330                 param->enable = HCLGE_PROMISC_EN_UC;
4331         if (en_mc)
4332                 param->enable |= HCLGE_PROMISC_EN_MC;
4333         if (en_bc)
4334                 param->enable |= HCLGE_PROMISC_EN_BC;
4335         param->vf_id = vport_id;
4336 }
4337
4338 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4339                                   bool en_mc_pmc)
4340 {
4341         struct hclge_vport *vport = hclge_get_vport(handle);
4342         struct hclge_dev *hdev = vport->back;
4343         struct hclge_promisc_param param;
4344         bool en_bc_pmc = true;
4345
4346         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4347          * always bypassed. So broadcast promisc should be disabled until
4348          * user enable promisc mode
4349          */
4350         if (handle->pdev->revision == 0x20)
4351                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4352
4353         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4354                                  vport->vport_id);
4355         return hclge_cmd_set_promisc_mode(hdev, &param);
4356 }
4357
4358 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4359 {
4360         struct hclge_get_fd_mode_cmd *req;
4361         struct hclge_desc desc;
4362         int ret;
4363
4364         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4365
4366         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4367
4368         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4369         if (ret) {
4370                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4371                 return ret;
4372         }
4373
4374         *fd_mode = req->mode;
4375
4376         return ret;
4377 }
4378
4379 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4380                                    u32 *stage1_entry_num,
4381                                    u32 *stage2_entry_num,
4382                                    u16 *stage1_counter_num,
4383                                    u16 *stage2_counter_num)
4384 {
4385         struct hclge_get_fd_allocation_cmd *req;
4386         struct hclge_desc desc;
4387         int ret;
4388
4389         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4390
4391         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4392
4393         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4394         if (ret) {
4395                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4396                         ret);
4397                 return ret;
4398         }
4399
4400         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4401         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4402         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4403         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4404
4405         return ret;
4406 }
4407
4408 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4409 {
4410         struct hclge_set_fd_key_config_cmd *req;
4411         struct hclge_fd_key_cfg *stage;
4412         struct hclge_desc desc;
4413         int ret;
4414
4415         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4416
4417         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4418         stage = &hdev->fd_cfg.key_cfg[stage_num];
4419         req->stage = stage_num;
4420         req->key_select = stage->key_sel;
4421         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4422         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4423         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4424         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4425         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4426         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4427
4428         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4429         if (ret)
4430                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4431
4432         return ret;
4433 }
4434
4435 static int hclge_init_fd_config(struct hclge_dev *hdev)
4436 {
4437 #define LOW_2_WORDS             0x03
4438         struct hclge_fd_key_cfg *key_cfg;
4439         int ret;
4440
4441         if (!hnae3_dev_fd_supported(hdev))
4442                 return 0;
4443
4444         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4445         if (ret)
4446                 return ret;
4447
4448         switch (hdev->fd_cfg.fd_mode) {
4449         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4450                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4451                 break;
4452         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4453                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4454                 break;
4455         default:
4456                 dev_err(&hdev->pdev->dev,
4457                         "Unsupported flow director mode %d\n",
4458                         hdev->fd_cfg.fd_mode);
4459                 return -EOPNOTSUPP;
4460         }
4461
4462         hdev->fd_cfg.proto_support =
4463                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4464                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4465         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4466         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4467         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4468         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4469         key_cfg->outer_sipv6_word_en = 0;
4470         key_cfg->outer_dipv6_word_en = 0;
4471
4472         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4473                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4474                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4475                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4476
4477         /* If use max 400bit key, we can support tuples for ether type */
4478         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4479                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4480                 key_cfg->tuple_active |=
4481                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4482         }
4483
4484         /* roce_type is used to filter roce frames
4485          * dst_vport is used to specify the rule
4486          */
4487         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4488
4489         ret = hclge_get_fd_allocation(hdev,
4490                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4491                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4492                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4493                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4494         if (ret)
4495                 return ret;
4496
4497         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4498 }
4499
4500 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4501                                 int loc, u8 *key, bool is_add)
4502 {
4503         struct hclge_fd_tcam_config_1_cmd *req1;
4504         struct hclge_fd_tcam_config_2_cmd *req2;
4505         struct hclge_fd_tcam_config_3_cmd *req3;
4506         struct hclge_desc desc[3];
4507         int ret;
4508
4509         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4510         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4511         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4512         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4513         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4514
4515         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4516         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4517         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4518
4519         req1->stage = stage;
4520         req1->xy_sel = sel_x ? 1 : 0;
4521         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4522         req1->index = cpu_to_le32(loc);
4523         req1->entry_vld = sel_x ? is_add : 0;
4524
4525         if (key) {
4526                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4527                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4528                        sizeof(req2->tcam_data));
4529                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4530                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4531         }
4532
4533         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4534         if (ret)
4535                 dev_err(&hdev->pdev->dev,
4536                         "config tcam key fail, ret=%d\n",
4537                         ret);
4538
4539         return ret;
4540 }
4541
4542 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4543                               struct hclge_fd_ad_data *action)
4544 {
4545         struct hclge_fd_ad_config_cmd *req;
4546         struct hclge_desc desc;
4547         u64 ad_data = 0;
4548         int ret;
4549
4550         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4551
4552         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4553         req->index = cpu_to_le32(loc);
4554         req->stage = stage;
4555
4556         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4557                       action->write_rule_id_to_bd);
4558         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4559                         action->rule_id);
4560         ad_data <<= 32;
4561         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4562         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4563                       action->forward_to_direct_queue);
4564         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4565                         action->queue_id);
4566         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4567         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4568                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4569         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4570         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4571                         action->counter_id);
4572
4573         req->ad_data = cpu_to_le64(ad_data);
4574         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4575         if (ret)
4576                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4577
4578         return ret;
4579 }
4580
4581 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4582                                    struct hclge_fd_rule *rule)
4583 {
4584         u16 tmp_x_s, tmp_y_s;
4585         u32 tmp_x_l, tmp_y_l;
4586         int i;
4587
4588         if (rule->unused_tuple & tuple_bit)
4589                 return true;
4590
4591         switch (tuple_bit) {
4592         case 0:
4593                 return false;
4594         case BIT(INNER_DST_MAC):
4595                 for (i = 0; i < ETH_ALEN; i++) {
4596                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4597                                rule->tuples_mask.dst_mac[i]);
4598                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4599                                rule->tuples_mask.dst_mac[i]);
4600                 }
4601
4602                 return true;
4603         case BIT(INNER_SRC_MAC):
4604                 for (i = 0; i < ETH_ALEN; i++) {
4605                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4606                                rule->tuples.src_mac[i]);
4607                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4608                                rule->tuples.src_mac[i]);
4609                 }
4610
4611                 return true;
4612         case BIT(INNER_VLAN_TAG_FST):
4613                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4614                        rule->tuples_mask.vlan_tag1);
4615                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4616                        rule->tuples_mask.vlan_tag1);
4617                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4618                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4619
4620                 return true;
4621         case BIT(INNER_ETH_TYPE):
4622                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4623                        rule->tuples_mask.ether_proto);
4624                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4625                        rule->tuples_mask.ether_proto);
4626                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4627                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4628
4629                 return true;
4630         case BIT(INNER_IP_TOS):
4631                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4632                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4633
4634                 return true;
4635         case BIT(INNER_IP_PROTO):
4636                 calc_x(*key_x, rule->tuples.ip_proto,
4637                        rule->tuples_mask.ip_proto);
4638                 calc_y(*key_y, rule->tuples.ip_proto,
4639                        rule->tuples_mask.ip_proto);
4640
4641                 return true;
4642         case BIT(INNER_SRC_IP):
4643                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4644                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4645                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4646                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4647                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4648                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4649
4650                 return true;
4651         case BIT(INNER_DST_IP):
4652                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4653                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4654                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4655                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4656                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4657                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4658
4659                 return true;
4660         case BIT(INNER_SRC_PORT):
4661                 calc_x(tmp_x_s, rule->tuples.src_port,
4662                        rule->tuples_mask.src_port);
4663                 calc_y(tmp_y_s, rule->tuples.src_port,
4664                        rule->tuples_mask.src_port);
4665                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4666                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4667
4668                 return true;
4669         case BIT(INNER_DST_PORT):
4670                 calc_x(tmp_x_s, rule->tuples.dst_port,
4671                        rule->tuples_mask.dst_port);
4672                 calc_y(tmp_y_s, rule->tuples.dst_port,
4673                        rule->tuples_mask.dst_port);
4674                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4675                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4676
4677                 return true;
4678         default:
4679                 return false;
4680         }
4681 }
4682
4683 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4684                                  u8 vf_id, u8 network_port_id)
4685 {
4686         u32 port_number = 0;
4687
4688         if (port_type == HOST_PORT) {
4689                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4690                                 pf_id);
4691                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4692                                 vf_id);
4693                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4694         } else {
4695                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4696                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4697                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4698         }
4699
4700         return port_number;
4701 }
4702
4703 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4704                                        __le32 *key_x, __le32 *key_y,
4705                                        struct hclge_fd_rule *rule)
4706 {
4707         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4708         u8 cur_pos = 0, tuple_size, shift_bits;
4709         unsigned int i;
4710
4711         for (i = 0; i < MAX_META_DATA; i++) {
4712                 tuple_size = meta_data_key_info[i].key_length;
4713                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4714
4715                 switch (tuple_bit) {
4716                 case BIT(ROCE_TYPE):
4717                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4718                         cur_pos += tuple_size;
4719                         break;
4720                 case BIT(DST_VPORT):
4721                         port_number = hclge_get_port_number(HOST_PORT, 0,
4722                                                             rule->vf_id, 0);
4723                         hnae3_set_field(meta_data,
4724                                         GENMASK(cur_pos + tuple_size, cur_pos),
4725                                         cur_pos, port_number);
4726                         cur_pos += tuple_size;
4727                         break;
4728                 default:
4729                         break;
4730                 }
4731         }
4732
4733         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4734         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4735         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4736
4737         *key_x = cpu_to_le32(tmp_x << shift_bits);
4738         *key_y = cpu_to_le32(tmp_y << shift_bits);
4739 }
4740
4741 /* A complete key is combined with meta data key and tuple key.
4742  * Meta data key is stored at the MSB region, and tuple key is stored at
4743  * the LSB region, unused bits will be filled 0.
4744  */
4745 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4746                             struct hclge_fd_rule *rule)
4747 {
4748         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4749         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4750         u8 *cur_key_x, *cur_key_y;
4751         unsigned int i;
4752         int ret, tuple_size;
4753         u8 meta_data_region;
4754
4755         memset(key_x, 0, sizeof(key_x));
4756         memset(key_y, 0, sizeof(key_y));
4757         cur_key_x = key_x;
4758         cur_key_y = key_y;
4759
4760         for (i = 0 ; i < MAX_TUPLE; i++) {
4761                 bool tuple_valid;
4762                 u32 check_tuple;
4763
4764                 tuple_size = tuple_key_info[i].key_length / 8;
4765                 check_tuple = key_cfg->tuple_active & BIT(i);
4766
4767                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4768                                                      cur_key_y, rule);
4769                 if (tuple_valid) {
4770                         cur_key_x += tuple_size;
4771                         cur_key_y += tuple_size;
4772                 }
4773         }
4774
4775         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4776                         MAX_META_DATA_LENGTH / 8;
4777
4778         hclge_fd_convert_meta_data(key_cfg,
4779                                    (__le32 *)(key_x + meta_data_region),
4780                                    (__le32 *)(key_y + meta_data_region),
4781                                    rule);
4782
4783         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4784                                    true);
4785         if (ret) {
4786                 dev_err(&hdev->pdev->dev,
4787                         "fd key_y config fail, loc=%d, ret=%d\n",
4788                         rule->queue_id, ret);
4789                 return ret;
4790         }
4791
4792         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4793                                    true);
4794         if (ret)
4795                 dev_err(&hdev->pdev->dev,
4796                         "fd key_x config fail, loc=%d, ret=%d\n",
4797                         rule->queue_id, ret);
4798         return ret;
4799 }
4800
4801 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4802                                struct hclge_fd_rule *rule)
4803 {
4804         struct hclge_fd_ad_data ad_data;
4805
4806         ad_data.ad_id = rule->location;
4807
4808         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4809                 ad_data.drop_packet = true;
4810                 ad_data.forward_to_direct_queue = false;
4811                 ad_data.queue_id = 0;
4812         } else {
4813                 ad_data.drop_packet = false;
4814                 ad_data.forward_to_direct_queue = true;
4815                 ad_data.queue_id = rule->queue_id;
4816         }
4817
4818         ad_data.use_counter = false;
4819         ad_data.counter_id = 0;
4820
4821         ad_data.use_next_stage = false;
4822         ad_data.next_input_key = 0;
4823
4824         ad_data.write_rule_id_to_bd = true;
4825         ad_data.rule_id = rule->location;
4826
4827         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4828 }
4829
4830 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4831                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4832 {
4833         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4834         struct ethtool_usrip4_spec *usr_ip4_spec;
4835         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4836         struct ethtool_usrip6_spec *usr_ip6_spec;
4837         struct ethhdr *ether_spec;
4838
4839         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4840                 return -EINVAL;
4841
4842         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4843                 return -EOPNOTSUPP;
4844
4845         if ((fs->flow_type & FLOW_EXT) &&
4846             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4847                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4848                 return -EOPNOTSUPP;
4849         }
4850
4851         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4852         case SCTP_V4_FLOW:
4853         case TCP_V4_FLOW:
4854         case UDP_V4_FLOW:
4855                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4856                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4857
4858                 if (!tcp_ip4_spec->ip4src)
4859                         *unused |= BIT(INNER_SRC_IP);
4860
4861                 if (!tcp_ip4_spec->ip4dst)
4862                         *unused |= BIT(INNER_DST_IP);
4863
4864                 if (!tcp_ip4_spec->psrc)
4865                         *unused |= BIT(INNER_SRC_PORT);
4866
4867                 if (!tcp_ip4_spec->pdst)
4868                         *unused |= BIT(INNER_DST_PORT);
4869
4870                 if (!tcp_ip4_spec->tos)
4871                         *unused |= BIT(INNER_IP_TOS);
4872
4873                 break;
4874         case IP_USER_FLOW:
4875                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4876                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4877                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4878
4879                 if (!usr_ip4_spec->ip4src)
4880                         *unused |= BIT(INNER_SRC_IP);
4881
4882                 if (!usr_ip4_spec->ip4dst)
4883                         *unused |= BIT(INNER_DST_IP);
4884
4885                 if (!usr_ip4_spec->tos)
4886                         *unused |= BIT(INNER_IP_TOS);
4887
4888                 if (!usr_ip4_spec->proto)
4889                         *unused |= BIT(INNER_IP_PROTO);
4890
4891                 if (usr_ip4_spec->l4_4_bytes)
4892                         return -EOPNOTSUPP;
4893
4894                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4895                         return -EOPNOTSUPP;
4896
4897                 break;
4898         case SCTP_V6_FLOW:
4899         case TCP_V6_FLOW:
4900         case UDP_V6_FLOW:
4901                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4902                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4903                         BIT(INNER_IP_TOS);
4904
4905                 /* check whether src/dst ip address used */
4906                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4907                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4908                         *unused |= BIT(INNER_SRC_IP);
4909
4910                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4911                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4912                         *unused |= BIT(INNER_DST_IP);
4913
4914                 if (!tcp_ip6_spec->psrc)
4915                         *unused |= BIT(INNER_SRC_PORT);
4916
4917                 if (!tcp_ip6_spec->pdst)
4918                         *unused |= BIT(INNER_DST_PORT);
4919
4920                 if (tcp_ip6_spec->tclass)
4921                         return -EOPNOTSUPP;
4922
4923                 break;
4924         case IPV6_USER_FLOW:
4925                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4926                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4927                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4928                         BIT(INNER_DST_PORT);
4929
4930                 /* check whether src/dst ip address used */
4931                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4932                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4933                         *unused |= BIT(INNER_SRC_IP);
4934
4935                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4936                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4937                         *unused |= BIT(INNER_DST_IP);
4938
4939                 if (!usr_ip6_spec->l4_proto)
4940                         *unused |= BIT(INNER_IP_PROTO);
4941
4942                 if (usr_ip6_spec->tclass)
4943                         return -EOPNOTSUPP;
4944
4945                 if (usr_ip6_spec->l4_4_bytes)
4946                         return -EOPNOTSUPP;
4947
4948                 break;
4949         case ETHER_FLOW:
4950                 ether_spec = &fs->h_u.ether_spec;
4951                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4952                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4953                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4954
4955                 if (is_zero_ether_addr(ether_spec->h_source))
4956                         *unused |= BIT(INNER_SRC_MAC);
4957
4958                 if (is_zero_ether_addr(ether_spec->h_dest))
4959                         *unused |= BIT(INNER_DST_MAC);
4960
4961                 if (!ether_spec->h_proto)
4962                         *unused |= BIT(INNER_ETH_TYPE);
4963
4964                 break;
4965         default:
4966                 return -EOPNOTSUPP;
4967         }
4968
4969         if ((fs->flow_type & FLOW_EXT)) {
4970                 if (fs->h_ext.vlan_etype)
4971                         return -EOPNOTSUPP;
4972                 if (!fs->h_ext.vlan_tci)
4973                         *unused |= BIT(INNER_VLAN_TAG_FST);
4974
4975                 if (fs->m_ext.vlan_tci) {
4976                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4977                                 return -EINVAL;
4978                 }
4979         } else {
4980                 *unused |= BIT(INNER_VLAN_TAG_FST);
4981         }
4982
4983         if (fs->flow_type & FLOW_MAC_EXT) {
4984                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4985                         return -EOPNOTSUPP;
4986
4987                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4988                         *unused |= BIT(INNER_DST_MAC);
4989                 else
4990                         *unused &= ~(BIT(INNER_DST_MAC));
4991         }
4992
4993         return 0;
4994 }
4995
4996 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4997 {
4998         struct hclge_fd_rule *rule = NULL;
4999         struct hlist_node *node2;
5000
5001         spin_lock_bh(&hdev->fd_rule_lock);
5002         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5003                 if (rule->location >= location)
5004                         break;
5005         }
5006
5007         spin_unlock_bh(&hdev->fd_rule_lock);
5008
5009         return  rule && rule->location == location;
5010 }
5011
5012 /* make sure being called after lock up with fd_rule_lock */
5013 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5014                                      struct hclge_fd_rule *new_rule,
5015                                      u16 location,
5016                                      bool is_add)
5017 {
5018         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5019         struct hlist_node *node2;
5020
5021         if (is_add && !new_rule)
5022                 return -EINVAL;
5023
5024         hlist_for_each_entry_safe(rule, node2,
5025                                   &hdev->fd_rule_list, rule_node) {
5026                 if (rule->location >= location)
5027                         break;
5028                 parent = rule;
5029         }
5030
5031         if (rule && rule->location == location) {
5032                 hlist_del(&rule->rule_node);
5033                 kfree(rule);
5034                 hdev->hclge_fd_rule_num--;
5035
5036                 if (!is_add) {
5037                         if (!hdev->hclge_fd_rule_num)
5038                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5039                         clear_bit(location, hdev->fd_bmap);
5040
5041                         return 0;
5042                 }
5043         } else if (!is_add) {
5044                 dev_err(&hdev->pdev->dev,
5045                         "delete fail, rule %d is inexistent\n",
5046                         location);
5047                 return -EINVAL;
5048         }
5049
5050         INIT_HLIST_NODE(&new_rule->rule_node);
5051
5052         if (parent)
5053                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5054         else
5055                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5056
5057         set_bit(location, hdev->fd_bmap);
5058         hdev->hclge_fd_rule_num++;
5059         hdev->fd_active_type = new_rule->rule_type;
5060
5061         return 0;
5062 }
5063
5064 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5065                               struct ethtool_rx_flow_spec *fs,
5066                               struct hclge_fd_rule *rule)
5067 {
5068         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5069
5070         switch (flow_type) {
5071         case SCTP_V4_FLOW:
5072         case TCP_V4_FLOW:
5073         case UDP_V4_FLOW:
5074                 rule->tuples.src_ip[IPV4_INDEX] =
5075                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5076                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5077                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5078
5079                 rule->tuples.dst_ip[IPV4_INDEX] =
5080                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5081                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5082                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5083
5084                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5085                 rule->tuples_mask.src_port =
5086                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5087
5088                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5089                 rule->tuples_mask.dst_port =
5090                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5091
5092                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5093                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5094
5095                 rule->tuples.ether_proto = ETH_P_IP;
5096                 rule->tuples_mask.ether_proto = 0xFFFF;
5097
5098                 break;
5099         case IP_USER_FLOW:
5100                 rule->tuples.src_ip[IPV4_INDEX] =
5101                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5102                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5103                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5104
5105                 rule->tuples.dst_ip[IPV4_INDEX] =
5106                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5107                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5108                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5109
5110                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5111                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5112
5113                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5114                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5115
5116                 rule->tuples.ether_proto = ETH_P_IP;
5117                 rule->tuples_mask.ether_proto = 0xFFFF;
5118
5119                 break;
5120         case SCTP_V6_FLOW:
5121         case TCP_V6_FLOW:
5122         case UDP_V6_FLOW:
5123                 be32_to_cpu_array(rule->tuples.src_ip,
5124                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5125                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5126                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5127
5128                 be32_to_cpu_array(rule->tuples.dst_ip,
5129                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5130                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5131                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5132
5133                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5134                 rule->tuples_mask.src_port =
5135                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5136
5137                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5138                 rule->tuples_mask.dst_port =
5139                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5140
5141                 rule->tuples.ether_proto = ETH_P_IPV6;
5142                 rule->tuples_mask.ether_proto = 0xFFFF;
5143
5144                 break;
5145         case IPV6_USER_FLOW:
5146                 be32_to_cpu_array(rule->tuples.src_ip,
5147                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5148                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5149                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5150
5151                 be32_to_cpu_array(rule->tuples.dst_ip,
5152                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5153                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5154                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5155
5156                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5157                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5158
5159                 rule->tuples.ether_proto = ETH_P_IPV6;
5160                 rule->tuples_mask.ether_proto = 0xFFFF;
5161
5162                 break;
5163         case ETHER_FLOW:
5164                 ether_addr_copy(rule->tuples.src_mac,
5165                                 fs->h_u.ether_spec.h_source);
5166                 ether_addr_copy(rule->tuples_mask.src_mac,
5167                                 fs->m_u.ether_spec.h_source);
5168
5169                 ether_addr_copy(rule->tuples.dst_mac,
5170                                 fs->h_u.ether_spec.h_dest);
5171                 ether_addr_copy(rule->tuples_mask.dst_mac,
5172                                 fs->m_u.ether_spec.h_dest);
5173
5174                 rule->tuples.ether_proto =
5175                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5176                 rule->tuples_mask.ether_proto =
5177                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5178
5179                 break;
5180         default:
5181                 return -EOPNOTSUPP;
5182         }
5183
5184         switch (flow_type) {
5185         case SCTP_V4_FLOW:
5186         case SCTP_V6_FLOW:
5187                 rule->tuples.ip_proto = IPPROTO_SCTP;
5188                 rule->tuples_mask.ip_proto = 0xFF;
5189                 break;
5190         case TCP_V4_FLOW:
5191         case TCP_V6_FLOW:
5192                 rule->tuples.ip_proto = IPPROTO_TCP;
5193                 rule->tuples_mask.ip_proto = 0xFF;
5194                 break;
5195         case UDP_V4_FLOW:
5196         case UDP_V6_FLOW:
5197                 rule->tuples.ip_proto = IPPROTO_UDP;
5198                 rule->tuples_mask.ip_proto = 0xFF;
5199                 break;
5200         default:
5201                 break;
5202         }
5203
5204         if ((fs->flow_type & FLOW_EXT)) {
5205                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5206                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5207         }
5208
5209         if (fs->flow_type & FLOW_MAC_EXT) {
5210                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5211                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5212         }
5213
5214         return 0;
5215 }
5216
5217 /* make sure being called after lock up with fd_rule_lock */
5218 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5219                                 struct hclge_fd_rule *rule)
5220 {
5221         int ret;
5222
5223         if (!rule) {
5224                 dev_err(&hdev->pdev->dev,
5225                         "The flow director rule is NULL\n");
5226                 return -EINVAL;
5227         }
5228
5229         /* it will never fail here, so needn't to check return value */
5230         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5231
5232         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5233         if (ret)
5234                 goto clear_rule;
5235
5236         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5237         if (ret)
5238                 goto clear_rule;
5239
5240         return 0;
5241
5242 clear_rule:
5243         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5244         return ret;
5245 }
5246
5247 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5248                               struct ethtool_rxnfc *cmd)
5249 {
5250         struct hclge_vport *vport = hclge_get_vport(handle);
5251         struct hclge_dev *hdev = vport->back;
5252         u16 dst_vport_id = 0, q_index = 0;
5253         struct ethtool_rx_flow_spec *fs;
5254         struct hclge_fd_rule *rule;
5255         u32 unused = 0;
5256         u8 action;
5257         int ret;
5258
5259         if (!hnae3_dev_fd_supported(hdev))
5260                 return -EOPNOTSUPP;
5261
5262         if (!hdev->fd_en) {
5263                 dev_warn(&hdev->pdev->dev,
5264                          "Please enable flow director first\n");
5265                 return -EOPNOTSUPP;
5266         }
5267
5268         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5269
5270         ret = hclge_fd_check_spec(hdev, fs, &unused);
5271         if (ret) {
5272                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5273                 return ret;
5274         }
5275
5276         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5277                 action = HCLGE_FD_ACTION_DROP_PACKET;
5278         } else {
5279                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5280                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5281                 u16 tqps;
5282
5283                 if (vf > hdev->num_req_vfs) {
5284                         dev_err(&hdev->pdev->dev,
5285                                 "Error: vf id (%d) > max vf num (%d)\n",
5286                                 vf, hdev->num_req_vfs);
5287                         return -EINVAL;
5288                 }
5289
5290                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5291                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5292
5293                 if (ring >= tqps) {
5294                         dev_err(&hdev->pdev->dev,
5295                                 "Error: queue id (%d) > max tqp num (%d)\n",
5296                                 ring, tqps - 1);
5297                         return -EINVAL;
5298                 }
5299
5300                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5301                 q_index = ring;
5302         }
5303
5304         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5305         if (!rule)
5306                 return -ENOMEM;
5307
5308         ret = hclge_fd_get_tuple(hdev, fs, rule);
5309         if (ret) {
5310                 kfree(rule);
5311                 return ret;
5312         }
5313
5314         rule->flow_type = fs->flow_type;
5315
5316         rule->location = fs->location;
5317         rule->unused_tuple = unused;
5318         rule->vf_id = dst_vport_id;
5319         rule->queue_id = q_index;
5320         rule->action = action;
5321         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5322
5323         /* to avoid rule conflict, when user configure rule by ethtool,
5324          * we need to clear all arfs rules
5325          */
5326         hclge_clear_arfs_rules(handle);
5327
5328         spin_lock_bh(&hdev->fd_rule_lock);
5329         ret = hclge_fd_config_rule(hdev, rule);
5330
5331         spin_unlock_bh(&hdev->fd_rule_lock);
5332
5333         return ret;
5334 }
5335
5336 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5337                               struct ethtool_rxnfc *cmd)
5338 {
5339         struct hclge_vport *vport = hclge_get_vport(handle);
5340         struct hclge_dev *hdev = vport->back;
5341         struct ethtool_rx_flow_spec *fs;
5342         int ret;
5343
5344         if (!hnae3_dev_fd_supported(hdev))
5345                 return -EOPNOTSUPP;
5346
5347         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5348
5349         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5350                 return -EINVAL;
5351
5352         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5353                 dev_err(&hdev->pdev->dev,
5354                         "Delete fail, rule %d is inexistent\n", fs->location);
5355                 return -ENOENT;
5356         }
5357
5358         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5359                                    NULL, false);
5360         if (ret)
5361                 return ret;
5362
5363         spin_lock_bh(&hdev->fd_rule_lock);
5364         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5365
5366         spin_unlock_bh(&hdev->fd_rule_lock);
5367
5368         return ret;
5369 }
5370
5371 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5372                                      bool clear_list)
5373 {
5374         struct hclge_vport *vport = hclge_get_vport(handle);
5375         struct hclge_dev *hdev = vport->back;
5376         struct hclge_fd_rule *rule;
5377         struct hlist_node *node;
5378         u16 location;
5379
5380         if (!hnae3_dev_fd_supported(hdev))
5381                 return;
5382
5383         spin_lock_bh(&hdev->fd_rule_lock);
5384         for_each_set_bit(location, hdev->fd_bmap,
5385                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5386                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5387                                      NULL, false);
5388
5389         if (clear_list) {
5390                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5391                                           rule_node) {
5392                         hlist_del(&rule->rule_node);
5393                         kfree(rule);
5394                 }
5395                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5396                 hdev->hclge_fd_rule_num = 0;
5397                 bitmap_zero(hdev->fd_bmap,
5398                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5399         }
5400
5401         spin_unlock_bh(&hdev->fd_rule_lock);
5402 }
5403
5404 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5405 {
5406         struct hclge_vport *vport = hclge_get_vport(handle);
5407         struct hclge_dev *hdev = vport->back;
5408         struct hclge_fd_rule *rule;
5409         struct hlist_node *node;
5410         int ret;
5411
5412         /* Return ok here, because reset error handling will check this
5413          * return value. If error is returned here, the reset process will
5414          * fail.
5415          */
5416         if (!hnae3_dev_fd_supported(hdev))
5417                 return 0;
5418
5419         /* if fd is disabled, should not restore it when reset */
5420         if (!hdev->fd_en)
5421                 return 0;
5422
5423         spin_lock_bh(&hdev->fd_rule_lock);
5424         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5425                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5426                 if (!ret)
5427                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5428
5429                 if (ret) {
5430                         dev_warn(&hdev->pdev->dev,
5431                                  "Restore rule %d failed, remove it\n",
5432                                  rule->location);
5433                         clear_bit(rule->location, hdev->fd_bmap);
5434                         hlist_del(&rule->rule_node);
5435                         kfree(rule);
5436                         hdev->hclge_fd_rule_num--;
5437                 }
5438         }
5439
5440         if (hdev->hclge_fd_rule_num)
5441                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5442
5443         spin_unlock_bh(&hdev->fd_rule_lock);
5444
5445         return 0;
5446 }
5447
5448 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5449                                  struct ethtool_rxnfc *cmd)
5450 {
5451         struct hclge_vport *vport = hclge_get_vport(handle);
5452         struct hclge_dev *hdev = vport->back;
5453
5454         if (!hnae3_dev_fd_supported(hdev))
5455                 return -EOPNOTSUPP;
5456
5457         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5458         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5459
5460         return 0;
5461 }
5462
5463 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5464                                   struct ethtool_rxnfc *cmd)
5465 {
5466         struct hclge_vport *vport = hclge_get_vport(handle);
5467         struct hclge_fd_rule *rule = NULL;
5468         struct hclge_dev *hdev = vport->back;
5469         struct ethtool_rx_flow_spec *fs;
5470         struct hlist_node *node2;
5471
5472         if (!hnae3_dev_fd_supported(hdev))
5473                 return -EOPNOTSUPP;
5474
5475         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5476
5477         spin_lock_bh(&hdev->fd_rule_lock);
5478
5479         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5480                 if (rule->location >= fs->location)
5481                         break;
5482         }
5483
5484         if (!rule || fs->location != rule->location) {
5485                 spin_unlock_bh(&hdev->fd_rule_lock);
5486
5487                 return -ENOENT;
5488         }
5489
5490         fs->flow_type = rule->flow_type;
5491         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5492         case SCTP_V4_FLOW:
5493         case TCP_V4_FLOW:
5494         case UDP_V4_FLOW:
5495                 fs->h_u.tcp_ip4_spec.ip4src =
5496                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5497                 fs->m_u.tcp_ip4_spec.ip4src =
5498                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5499                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5500
5501                 fs->h_u.tcp_ip4_spec.ip4dst =
5502                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5503                 fs->m_u.tcp_ip4_spec.ip4dst =
5504                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5505                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5506
5507                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5508                 fs->m_u.tcp_ip4_spec.psrc =
5509                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5510                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5511
5512                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5513                 fs->m_u.tcp_ip4_spec.pdst =
5514                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5515                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5516
5517                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5518                 fs->m_u.tcp_ip4_spec.tos =
5519                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5520                                 0 : rule->tuples_mask.ip_tos;
5521
5522                 break;
5523         case IP_USER_FLOW:
5524                 fs->h_u.usr_ip4_spec.ip4src =
5525                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5526                 fs->m_u.tcp_ip4_spec.ip4src =
5527                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5528                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5529
5530                 fs->h_u.usr_ip4_spec.ip4dst =
5531                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5532                 fs->m_u.usr_ip4_spec.ip4dst =
5533                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5534                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5535
5536                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5537                 fs->m_u.usr_ip4_spec.tos =
5538                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5539                                 0 : rule->tuples_mask.ip_tos;
5540
5541                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5542                 fs->m_u.usr_ip4_spec.proto =
5543                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5544                                 0 : rule->tuples_mask.ip_proto;
5545
5546                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5547
5548                 break;
5549         case SCTP_V6_FLOW:
5550         case TCP_V6_FLOW:
5551         case UDP_V6_FLOW:
5552                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5553                                   rule->tuples.src_ip, IPV6_SIZE);
5554                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5555                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5556                                sizeof(int) * IPV6_SIZE);
5557                 else
5558                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5559                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5560
5561                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5562                                   rule->tuples.dst_ip, IPV6_SIZE);
5563                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5564                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5565                                sizeof(int) * IPV6_SIZE);
5566                 else
5567                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5568                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5569
5570                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5571                 fs->m_u.tcp_ip6_spec.psrc =
5572                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5573                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5574
5575                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5576                 fs->m_u.tcp_ip6_spec.pdst =
5577                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5578                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5579
5580                 break;
5581         case IPV6_USER_FLOW:
5582                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5583                                   rule->tuples.src_ip, IPV6_SIZE);
5584                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5585                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5586                                sizeof(int) * IPV6_SIZE);
5587                 else
5588                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5589                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5590
5591                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5592                                   rule->tuples.dst_ip, IPV6_SIZE);
5593                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5594                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5595                                sizeof(int) * IPV6_SIZE);
5596                 else
5597                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5598                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5599
5600                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5601                 fs->m_u.usr_ip6_spec.l4_proto =
5602                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5603                                 0 : rule->tuples_mask.ip_proto;
5604
5605                 break;
5606         case ETHER_FLOW:
5607                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5608                                 rule->tuples.src_mac);
5609                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5610                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5611                 else
5612                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5613                                         rule->tuples_mask.src_mac);
5614
5615                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5616                                 rule->tuples.dst_mac);
5617                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5618                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5619                 else
5620                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5621                                         rule->tuples_mask.dst_mac);
5622
5623                 fs->h_u.ether_spec.h_proto =
5624                                 cpu_to_be16(rule->tuples.ether_proto);
5625                 fs->m_u.ether_spec.h_proto =
5626                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5627                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5628
5629                 break;
5630         default:
5631                 spin_unlock_bh(&hdev->fd_rule_lock);
5632                 return -EOPNOTSUPP;
5633         }
5634
5635         if (fs->flow_type & FLOW_EXT) {
5636                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5637                 fs->m_ext.vlan_tci =
5638                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5639                                 cpu_to_be16(VLAN_VID_MASK) :
5640                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5641         }
5642
5643         if (fs->flow_type & FLOW_MAC_EXT) {
5644                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5645                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5646                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5647                 else
5648                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5649                                         rule->tuples_mask.dst_mac);
5650         }
5651
5652         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5653                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5654         } else {
5655                 u64 vf_id;
5656
5657                 fs->ring_cookie = rule->queue_id;
5658                 vf_id = rule->vf_id;
5659                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5660                 fs->ring_cookie |= vf_id;
5661         }
5662
5663         spin_unlock_bh(&hdev->fd_rule_lock);
5664
5665         return 0;
5666 }
5667
5668 static int hclge_get_all_rules(struct hnae3_handle *handle,
5669                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5670 {
5671         struct hclge_vport *vport = hclge_get_vport(handle);
5672         struct hclge_dev *hdev = vport->back;
5673         struct hclge_fd_rule *rule;
5674         struct hlist_node *node2;
5675         int cnt = 0;
5676
5677         if (!hnae3_dev_fd_supported(hdev))
5678                 return -EOPNOTSUPP;
5679
5680         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5681
5682         spin_lock_bh(&hdev->fd_rule_lock);
5683         hlist_for_each_entry_safe(rule, node2,
5684                                   &hdev->fd_rule_list, rule_node) {
5685                 if (cnt == cmd->rule_cnt) {
5686                         spin_unlock_bh(&hdev->fd_rule_lock);
5687                         return -EMSGSIZE;
5688                 }
5689
5690                 rule_locs[cnt] = rule->location;
5691                 cnt++;
5692         }
5693
5694         spin_unlock_bh(&hdev->fd_rule_lock);
5695
5696         cmd->rule_cnt = cnt;
5697
5698         return 0;
5699 }
5700
5701 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5702                                      struct hclge_fd_rule_tuples *tuples)
5703 {
5704         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5705         tuples->ip_proto = fkeys->basic.ip_proto;
5706         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5707
5708         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5709                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5710                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5711         } else {
5712                 memcpy(tuples->src_ip,
5713                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5714                        sizeof(tuples->src_ip));
5715                 memcpy(tuples->dst_ip,
5716                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5717                        sizeof(tuples->dst_ip));
5718         }
5719 }
5720
5721 /* traverse all rules, check whether an existed rule has the same tuples */
5722 static struct hclge_fd_rule *
5723 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5724                           const struct hclge_fd_rule_tuples *tuples)
5725 {
5726         struct hclge_fd_rule *rule = NULL;
5727         struct hlist_node *node;
5728
5729         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5730                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5731                         return rule;
5732         }
5733
5734         return NULL;
5735 }
5736
5737 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5738                                      struct hclge_fd_rule *rule)
5739 {
5740         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5741                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5742                              BIT(INNER_SRC_PORT);
5743         rule->action = 0;
5744         rule->vf_id = 0;
5745         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5746         if (tuples->ether_proto == ETH_P_IP) {
5747                 if (tuples->ip_proto == IPPROTO_TCP)
5748                         rule->flow_type = TCP_V4_FLOW;
5749                 else
5750                         rule->flow_type = UDP_V4_FLOW;
5751         } else {
5752                 if (tuples->ip_proto == IPPROTO_TCP)
5753                         rule->flow_type = TCP_V6_FLOW;
5754                 else
5755                         rule->flow_type = UDP_V6_FLOW;
5756         }
5757         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5758         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5759 }
5760
5761 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5762                                       u16 flow_id, struct flow_keys *fkeys)
5763 {
5764         struct hclge_vport *vport = hclge_get_vport(handle);
5765         struct hclge_fd_rule_tuples new_tuples;
5766         struct hclge_dev *hdev = vport->back;
5767         struct hclge_fd_rule *rule;
5768         u16 tmp_queue_id;
5769         u16 bit_id;
5770         int ret;
5771
5772         if (!hnae3_dev_fd_supported(hdev))
5773                 return -EOPNOTSUPP;
5774
5775         memset(&new_tuples, 0, sizeof(new_tuples));
5776         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5777
5778         spin_lock_bh(&hdev->fd_rule_lock);
5779
5780         /* when there is already fd rule existed add by user,
5781          * arfs should not work
5782          */
5783         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5784                 spin_unlock_bh(&hdev->fd_rule_lock);
5785
5786                 return -EOPNOTSUPP;
5787         }
5788
5789         /* check is there flow director filter existed for this flow,
5790          * if not, create a new filter for it;
5791          * if filter exist with different queue id, modify the filter;
5792          * if filter exist with same queue id, do nothing
5793          */
5794         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5795         if (!rule) {
5796                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5797                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5798                         spin_unlock_bh(&hdev->fd_rule_lock);
5799
5800                         return -ENOSPC;
5801                 }
5802
5803                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5804                 if (!rule) {
5805                         spin_unlock_bh(&hdev->fd_rule_lock);
5806
5807                         return -ENOMEM;
5808                 }
5809
5810                 set_bit(bit_id, hdev->fd_bmap);
5811                 rule->location = bit_id;
5812                 rule->flow_id = flow_id;
5813                 rule->queue_id = queue_id;
5814                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5815                 ret = hclge_fd_config_rule(hdev, rule);
5816
5817                 spin_unlock_bh(&hdev->fd_rule_lock);
5818
5819                 if (ret)
5820                         return ret;
5821
5822                 return rule->location;
5823         }
5824
5825         spin_unlock_bh(&hdev->fd_rule_lock);
5826
5827         if (rule->queue_id == queue_id)
5828                 return rule->location;
5829
5830         tmp_queue_id = rule->queue_id;
5831         rule->queue_id = queue_id;
5832         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5833         if (ret) {
5834                 rule->queue_id = tmp_queue_id;
5835                 return ret;
5836         }
5837
5838         return rule->location;
5839 }
5840
5841 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5842 {
5843 #ifdef CONFIG_RFS_ACCEL
5844         struct hnae3_handle *handle = &hdev->vport[0].nic;
5845         struct hclge_fd_rule *rule;
5846         struct hlist_node *node;
5847         HLIST_HEAD(del_list);
5848
5849         spin_lock_bh(&hdev->fd_rule_lock);
5850         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5851                 spin_unlock_bh(&hdev->fd_rule_lock);
5852                 return;
5853         }
5854         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5855                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5856                                         rule->flow_id, rule->location)) {
5857                         hlist_del_init(&rule->rule_node);
5858                         hlist_add_head(&rule->rule_node, &del_list);
5859                         hdev->hclge_fd_rule_num--;
5860                         clear_bit(rule->location, hdev->fd_bmap);
5861                 }
5862         }
5863         spin_unlock_bh(&hdev->fd_rule_lock);
5864
5865         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5866                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5867                                      rule->location, NULL, false);
5868                 kfree(rule);
5869         }
5870 #endif
5871 }
5872
5873 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5874 {
5875 #ifdef CONFIG_RFS_ACCEL
5876         struct hclge_vport *vport = hclge_get_vport(handle);
5877         struct hclge_dev *hdev = vport->back;
5878
5879         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5880                 hclge_del_all_fd_entries(handle, true);
5881 #endif
5882 }
5883
5884 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5885 {
5886         struct hclge_vport *vport = hclge_get_vport(handle);
5887         struct hclge_dev *hdev = vport->back;
5888
5889         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5890                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5891 }
5892
5893 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5894 {
5895         struct hclge_vport *vport = hclge_get_vport(handle);
5896         struct hclge_dev *hdev = vport->back;
5897
5898         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5899 }
5900
5901 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5902 {
5903         struct hclge_vport *vport = hclge_get_vport(handle);
5904         struct hclge_dev *hdev = vport->back;
5905
5906         return hdev->rst_stats.hw_reset_done_cnt;
5907 }
5908
5909 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5910 {
5911         struct hclge_vport *vport = hclge_get_vport(handle);
5912         struct hclge_dev *hdev = vport->back;
5913         bool clear;
5914
5915         hdev->fd_en = enable;
5916         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5917         if (!enable)
5918                 hclge_del_all_fd_entries(handle, clear);
5919         else
5920                 hclge_restore_fd_entries(handle);
5921 }
5922
5923 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5924 {
5925         struct hclge_desc desc;
5926         struct hclge_config_mac_mode_cmd *req =
5927                 (struct hclge_config_mac_mode_cmd *)desc.data;
5928         u32 loop_en = 0;
5929         int ret;
5930
5931         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5932         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5933         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5934         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5935         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5936         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5937         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5938         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5939         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5940         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5941         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5942         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5943         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5944         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5945         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5946         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5947
5948         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5949         if (ret)
5950                 dev_err(&hdev->pdev->dev,
5951                         "mac enable fail, ret =%d.\n", ret);
5952 }
5953
5954 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5955 {
5956         struct hclge_config_mac_mode_cmd *req;
5957         struct hclge_desc desc;
5958         u32 loop_en;
5959         int ret;
5960
5961         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5962         /* 1 Read out the MAC mode config at first */
5963         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5964         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5965         if (ret) {
5966                 dev_err(&hdev->pdev->dev,
5967                         "mac loopback get fail, ret =%d.\n", ret);
5968                 return ret;
5969         }
5970
5971         /* 2 Then setup the loopback flag */
5972         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5973         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5974         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5975         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5976
5977         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5978
5979         /* 3 Config mac work mode with loopback flag
5980          * and its original configure parameters
5981          */
5982         hclge_cmd_reuse_desc(&desc, false);
5983         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5984         if (ret)
5985                 dev_err(&hdev->pdev->dev,
5986                         "mac loopback set fail, ret =%d.\n", ret);
5987         return ret;
5988 }
5989
5990 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5991                                      enum hnae3_loop loop_mode)
5992 {
5993 #define HCLGE_SERDES_RETRY_MS   10
5994 #define HCLGE_SERDES_RETRY_NUM  100
5995
5996 #define HCLGE_MAC_LINK_STATUS_MS   10
5997 #define HCLGE_MAC_LINK_STATUS_NUM  100
5998 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5999 #define HCLGE_MAC_LINK_STATUS_UP   1
6000
6001         struct hclge_serdes_lb_cmd *req;
6002         struct hclge_desc desc;
6003         int mac_link_ret = 0;
6004         int ret, i = 0;
6005         u8 loop_mode_b;
6006
6007         req = (struct hclge_serdes_lb_cmd *)desc.data;
6008         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6009
6010         switch (loop_mode) {
6011         case HNAE3_LOOP_SERIAL_SERDES:
6012                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6013                 break;
6014         case HNAE3_LOOP_PARALLEL_SERDES:
6015                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6016                 break;
6017         default:
6018                 dev_err(&hdev->pdev->dev,
6019                         "unsupported serdes loopback mode %d\n", loop_mode);
6020                 return -ENOTSUPP;
6021         }
6022
6023         if (en) {
6024                 req->enable = loop_mode_b;
6025                 req->mask = loop_mode_b;
6026                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
6027         } else {
6028                 req->mask = loop_mode_b;
6029                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
6030         }
6031
6032         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6033         if (ret) {
6034                 dev_err(&hdev->pdev->dev,
6035                         "serdes loopback set fail, ret = %d\n", ret);
6036                 return ret;
6037         }
6038
6039         do {
6040                 msleep(HCLGE_SERDES_RETRY_MS);
6041                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6042                                            true);
6043                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6044                 if (ret) {
6045                         dev_err(&hdev->pdev->dev,
6046                                 "serdes loopback get, ret = %d\n", ret);
6047                         return ret;
6048                 }
6049         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6050                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6051
6052         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6053                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6054                 return -EBUSY;
6055         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6056                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6057                 return -EIO;
6058         }
6059
6060         hclge_cfg_mac_mode(hdev, en);
6061
6062         i = 0;
6063         do {
6064                 /* serdes Internal loopback, independent of the network cable.*/
6065                 msleep(HCLGE_MAC_LINK_STATUS_MS);
6066                 ret = hclge_get_mac_link_status(hdev);
6067                 if (ret == mac_link_ret)
6068                         return 0;
6069         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6070
6071         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6072
6073         return -EBUSY;
6074 }
6075
6076 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6077                             int stream_id, bool enable)
6078 {
6079         struct hclge_desc desc;
6080         struct hclge_cfg_com_tqp_queue_cmd *req =
6081                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6082         int ret;
6083
6084         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6085         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6086         req->stream_id = cpu_to_le16(stream_id);
6087         if (enable)
6088                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6089
6090         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6091         if (ret)
6092                 dev_err(&hdev->pdev->dev,
6093                         "Tqp enable fail, status =%d.\n", ret);
6094         return ret;
6095 }
6096
6097 static int hclge_set_loopback(struct hnae3_handle *handle,
6098                               enum hnae3_loop loop_mode, bool en)
6099 {
6100         struct hclge_vport *vport = hclge_get_vport(handle);
6101         struct hnae3_knic_private_info *kinfo;
6102         struct hclge_dev *hdev = vport->back;
6103         int i, ret;
6104
6105         switch (loop_mode) {
6106         case HNAE3_LOOP_APP:
6107                 ret = hclge_set_app_loopback(hdev, en);
6108                 break;
6109         case HNAE3_LOOP_SERIAL_SERDES:
6110         case HNAE3_LOOP_PARALLEL_SERDES:
6111                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6112                 break;
6113         default:
6114                 ret = -ENOTSUPP;
6115                 dev_err(&hdev->pdev->dev,
6116                         "loop_mode %d is not supported\n", loop_mode);
6117                 break;
6118         }
6119
6120         if (ret)
6121                 return ret;
6122
6123         kinfo = &vport->nic.kinfo;
6124         for (i = 0; i < kinfo->num_tqps; i++) {
6125                 ret = hclge_tqp_enable(hdev, i, 0, en);
6126                 if (ret)
6127                         return ret;
6128         }
6129
6130         return 0;
6131 }
6132
6133 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6134 {
6135         struct hclge_vport *vport = hclge_get_vport(handle);
6136         struct hnae3_knic_private_info *kinfo;
6137         struct hnae3_queue *queue;
6138         struct hclge_tqp *tqp;
6139         int i;
6140
6141         kinfo = &vport->nic.kinfo;
6142         for (i = 0; i < kinfo->num_tqps; i++) {
6143                 queue = handle->kinfo.tqp[i];
6144                 tqp = container_of(queue, struct hclge_tqp, q);
6145                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6146         }
6147 }
6148
6149 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6150 {
6151         struct hclge_vport *vport = hclge_get_vport(handle);
6152         struct hclge_dev *hdev = vport->back;
6153
6154         if (enable) {
6155                 mod_timer(&hdev->service_timer, jiffies + HZ);
6156         } else {
6157                 del_timer_sync(&hdev->service_timer);
6158                 cancel_work_sync(&hdev->service_task);
6159                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6160         }
6161 }
6162
6163 static int hclge_ae_start(struct hnae3_handle *handle)
6164 {
6165         struct hclge_vport *vport = hclge_get_vport(handle);
6166         struct hclge_dev *hdev = vport->back;
6167
6168         /* mac enable */
6169         hclge_cfg_mac_mode(hdev, true);
6170         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6171         hdev->hw.mac.link = 0;
6172
6173         /* reset tqp stats */
6174         hclge_reset_tqp_stats(handle);
6175
6176         hclge_mac_start_phy(hdev);
6177
6178         return 0;
6179 }
6180
6181 static void hclge_ae_stop(struct hnae3_handle *handle)
6182 {
6183         struct hclge_vport *vport = hclge_get_vport(handle);
6184         struct hclge_dev *hdev = vport->back;
6185         int i;
6186
6187         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6188
6189         hclge_clear_arfs_rules(handle);
6190
6191         /* If it is not PF reset, the firmware will disable the MAC,
6192          * so it only need to stop phy here.
6193          */
6194         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6195             hdev->reset_type != HNAE3_FUNC_RESET) {
6196                 hclge_mac_stop_phy(hdev);
6197                 return;
6198         }
6199
6200         for (i = 0; i < handle->kinfo.num_tqps; i++)
6201                 hclge_reset_tqp(handle, i);
6202
6203         /* Mac disable */
6204         hclge_cfg_mac_mode(hdev, false);
6205
6206         hclge_mac_stop_phy(hdev);
6207
6208         /* reset tqp stats */
6209         hclge_reset_tqp_stats(handle);
6210         hclge_update_link_status(hdev);
6211 }
6212
6213 int hclge_vport_start(struct hclge_vport *vport)
6214 {
6215         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6216         vport->last_active_jiffies = jiffies;
6217         return 0;
6218 }
6219
6220 void hclge_vport_stop(struct hclge_vport *vport)
6221 {
6222         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6223 }
6224
6225 static int hclge_client_start(struct hnae3_handle *handle)
6226 {
6227         struct hclge_vport *vport = hclge_get_vport(handle);
6228
6229         return hclge_vport_start(vport);
6230 }
6231
6232 static void hclge_client_stop(struct hnae3_handle *handle)
6233 {
6234         struct hclge_vport *vport = hclge_get_vport(handle);
6235
6236         hclge_vport_stop(vport);
6237 }
6238
6239 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6240                                          u16 cmdq_resp, u8  resp_code,
6241                                          enum hclge_mac_vlan_tbl_opcode op)
6242 {
6243         struct hclge_dev *hdev = vport->back;
6244         int return_status = -EIO;
6245
6246         if (cmdq_resp) {
6247                 dev_err(&hdev->pdev->dev,
6248                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6249                         cmdq_resp);
6250                 return -EIO;
6251         }
6252
6253         if (op == HCLGE_MAC_VLAN_ADD) {
6254                 if ((!resp_code) || (resp_code == 1)) {
6255                         return_status = 0;
6256                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6257                         return_status = -ENOSPC;
6258                         dev_err(&hdev->pdev->dev,
6259                                 "add mac addr failed for uc_overflow.\n");
6260                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6261                         return_status = -ENOSPC;
6262                         dev_err(&hdev->pdev->dev,
6263                                 "add mac addr failed for mc_overflow.\n");
6264                 } else {
6265                         dev_err(&hdev->pdev->dev,
6266                                 "add mac addr failed for undefined, code=%d.\n",
6267                                 resp_code);
6268                 }
6269         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6270                 if (!resp_code) {
6271                         return_status = 0;
6272                 } else if (resp_code == 1) {
6273                         return_status = -ENOENT;
6274                         dev_dbg(&hdev->pdev->dev,
6275                                 "remove mac addr failed for miss.\n");
6276                 } else {
6277                         dev_err(&hdev->pdev->dev,
6278                                 "remove mac addr failed for undefined, code=%d.\n",
6279                                 resp_code);
6280                 }
6281         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6282                 if (!resp_code) {
6283                         return_status = 0;
6284                 } else if (resp_code == 1) {
6285                         return_status = -ENOENT;
6286                         dev_dbg(&hdev->pdev->dev,
6287                                 "lookup mac addr failed for miss.\n");
6288                 } else {
6289                         dev_err(&hdev->pdev->dev,
6290                                 "lookup mac addr failed for undefined, code=%d.\n",
6291                                 resp_code);
6292                 }
6293         } else {
6294                 return_status = -EINVAL;
6295                 dev_err(&hdev->pdev->dev,
6296                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6297                         op);
6298         }
6299
6300         return return_status;
6301 }
6302
6303 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6304 {
6305 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6306
6307         int word_num;
6308         int bit_num;
6309
6310         if (vfid > 255 || vfid < 0)
6311                 return -EIO;
6312
6313         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6314                 word_num = vfid / 32;
6315                 bit_num  = vfid % 32;
6316                 if (clr)
6317                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6318                 else
6319                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6320         } else {
6321                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6322                 bit_num  = vfid % 32;
6323                 if (clr)
6324                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6325                 else
6326                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6327         }
6328
6329         return 0;
6330 }
6331
6332 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6333 {
6334 #define HCLGE_DESC_NUMBER 3
6335 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6336         int i, j;
6337
6338         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6339                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6340                         if (desc[i].data[j])
6341                                 return false;
6342
6343         return true;
6344 }
6345
6346 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6347                                    const u8 *addr, bool is_mc)
6348 {
6349         const unsigned char *mac_addr = addr;
6350         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6351                        (mac_addr[0]) | (mac_addr[1] << 8);
6352         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6353
6354         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6355         if (is_mc) {
6356                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6357                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6358         }
6359
6360         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6361         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6362 }
6363
6364 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6365                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6366 {
6367         struct hclge_dev *hdev = vport->back;
6368         struct hclge_desc desc;
6369         u8 resp_code;
6370         u16 retval;
6371         int ret;
6372
6373         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6374
6375         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6376
6377         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6378         if (ret) {
6379                 dev_err(&hdev->pdev->dev,
6380                         "del mac addr failed for cmd_send, ret =%d.\n",
6381                         ret);
6382                 return ret;
6383         }
6384         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6385         retval = le16_to_cpu(desc.retval);
6386
6387         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6388                                              HCLGE_MAC_VLAN_REMOVE);
6389 }
6390
6391 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6392                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6393                                      struct hclge_desc *desc,
6394                                      bool is_mc)
6395 {
6396         struct hclge_dev *hdev = vport->back;
6397         u8 resp_code;
6398         u16 retval;
6399         int ret;
6400
6401         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6402         if (is_mc) {
6403                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6404                 memcpy(desc[0].data,
6405                        req,
6406                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6407                 hclge_cmd_setup_basic_desc(&desc[1],
6408                                            HCLGE_OPC_MAC_VLAN_ADD,
6409                                            true);
6410                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6411                 hclge_cmd_setup_basic_desc(&desc[2],
6412                                            HCLGE_OPC_MAC_VLAN_ADD,
6413                                            true);
6414                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6415         } else {
6416                 memcpy(desc[0].data,
6417                        req,
6418                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6419                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6420         }
6421         if (ret) {
6422                 dev_err(&hdev->pdev->dev,
6423                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6424                         ret);
6425                 return ret;
6426         }
6427         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6428         retval = le16_to_cpu(desc[0].retval);
6429
6430         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6431                                              HCLGE_MAC_VLAN_LKUP);
6432 }
6433
6434 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6435                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6436                                   struct hclge_desc *mc_desc)
6437 {
6438         struct hclge_dev *hdev = vport->back;
6439         int cfg_status;
6440         u8 resp_code;
6441         u16 retval;
6442         int ret;
6443
6444         if (!mc_desc) {
6445                 struct hclge_desc desc;
6446
6447                 hclge_cmd_setup_basic_desc(&desc,
6448                                            HCLGE_OPC_MAC_VLAN_ADD,
6449                                            false);
6450                 memcpy(desc.data, req,
6451                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6452                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6453                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6454                 retval = le16_to_cpu(desc.retval);
6455
6456                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6457                                                            resp_code,
6458                                                            HCLGE_MAC_VLAN_ADD);
6459         } else {
6460                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6461                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6462                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6463                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6464                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6465                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6466                 memcpy(mc_desc[0].data, req,
6467                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6468                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6469                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6470                 retval = le16_to_cpu(mc_desc[0].retval);
6471
6472                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6473                                                            resp_code,
6474                                                            HCLGE_MAC_VLAN_ADD);
6475         }
6476
6477         if (ret) {
6478                 dev_err(&hdev->pdev->dev,
6479                         "add mac addr failed for cmd_send, ret =%d.\n",
6480                         ret);
6481                 return ret;
6482         }
6483
6484         return cfg_status;
6485 }
6486
6487 static int hclge_init_umv_space(struct hclge_dev *hdev)
6488 {
6489         u16 allocated_size = 0;
6490         int ret;
6491
6492         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6493                                   true);
6494         if (ret)
6495                 return ret;
6496
6497         if (allocated_size < hdev->wanted_umv_size)
6498                 dev_warn(&hdev->pdev->dev,
6499                          "Alloc umv space failed, want %d, get %d\n",
6500                          hdev->wanted_umv_size, allocated_size);
6501
6502         mutex_init(&hdev->umv_mutex);
6503         hdev->max_umv_size = allocated_size;
6504         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6505          * preserve some unicast mac vlan table entries shared by pf
6506          * and its vfs.
6507          */
6508         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6509         hdev->share_umv_size = hdev->priv_umv_size +
6510                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6511
6512         return 0;
6513 }
6514
6515 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6516 {
6517         int ret;
6518
6519         if (hdev->max_umv_size > 0) {
6520                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6521                                           false);
6522                 if (ret)
6523                         return ret;
6524                 hdev->max_umv_size = 0;
6525         }
6526         mutex_destroy(&hdev->umv_mutex);
6527
6528         return 0;
6529 }
6530
6531 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6532                                u16 *allocated_size, bool is_alloc)
6533 {
6534         struct hclge_umv_spc_alc_cmd *req;
6535         struct hclge_desc desc;
6536         int ret;
6537
6538         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6539         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6540         if (!is_alloc)
6541                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6542
6543         req->space_size = cpu_to_le32(space_size);
6544
6545         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6546         if (ret) {
6547                 dev_err(&hdev->pdev->dev,
6548                         "%s umv space failed for cmd_send, ret =%d\n",
6549                         is_alloc ? "allocate" : "free", ret);
6550                 return ret;
6551         }
6552
6553         if (is_alloc && allocated_size)
6554                 *allocated_size = le32_to_cpu(desc.data[1]);
6555
6556         return 0;
6557 }
6558
6559 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6560 {
6561         struct hclge_vport *vport;
6562         int i;
6563
6564         for (i = 0; i < hdev->num_alloc_vport; i++) {
6565                 vport = &hdev->vport[i];
6566                 vport->used_umv_num = 0;
6567         }
6568
6569         mutex_lock(&hdev->umv_mutex);
6570         hdev->share_umv_size = hdev->priv_umv_size +
6571                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6572         mutex_unlock(&hdev->umv_mutex);
6573 }
6574
6575 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6576 {
6577         struct hclge_dev *hdev = vport->back;
6578         bool is_full;
6579
6580         mutex_lock(&hdev->umv_mutex);
6581         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6582                    hdev->share_umv_size == 0);
6583         mutex_unlock(&hdev->umv_mutex);
6584
6585         return is_full;
6586 }
6587
6588 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6589 {
6590         struct hclge_dev *hdev = vport->back;
6591
6592         mutex_lock(&hdev->umv_mutex);
6593         if (is_free) {
6594                 if (vport->used_umv_num > hdev->priv_umv_size)
6595                         hdev->share_umv_size++;
6596
6597                 if (vport->used_umv_num > 0)
6598                         vport->used_umv_num--;
6599         } else {
6600                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6601                     hdev->share_umv_size > 0)
6602                         hdev->share_umv_size--;
6603                 vport->used_umv_num++;
6604         }
6605         mutex_unlock(&hdev->umv_mutex);
6606 }
6607
6608 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6609                              const unsigned char *addr)
6610 {
6611         struct hclge_vport *vport = hclge_get_vport(handle);
6612
6613         return hclge_add_uc_addr_common(vport, addr);
6614 }
6615
6616 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6617                              const unsigned char *addr)
6618 {
6619         struct hclge_dev *hdev = vport->back;
6620         struct hclge_mac_vlan_tbl_entry_cmd req;
6621         struct hclge_desc desc;
6622         u16 egress_port = 0;
6623         int ret;
6624
6625         /* mac addr check */
6626         if (is_zero_ether_addr(addr) ||
6627             is_broadcast_ether_addr(addr) ||
6628             is_multicast_ether_addr(addr)) {
6629                 dev_err(&hdev->pdev->dev,
6630                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6631                          addr, is_zero_ether_addr(addr),
6632                          is_broadcast_ether_addr(addr),
6633                          is_multicast_ether_addr(addr));
6634                 return -EINVAL;
6635         }
6636
6637         memset(&req, 0, sizeof(req));
6638
6639         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6640                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6641
6642         req.egress_port = cpu_to_le16(egress_port);
6643
6644         hclge_prepare_mac_addr(&req, addr, false);
6645
6646         /* Lookup the mac address in the mac_vlan table, and add
6647          * it if the entry is inexistent. Repeated unicast entry
6648          * is not allowed in the mac vlan table.
6649          */
6650         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6651         if (ret == -ENOENT) {
6652                 if (!hclge_is_umv_space_full(vport)) {
6653                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6654                         if (!ret)
6655                                 hclge_update_umv_space(vport, false);
6656                         return ret;
6657                 }
6658
6659                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6660                         hdev->priv_umv_size);
6661
6662                 return -ENOSPC;
6663         }
6664
6665         /* check if we just hit the duplicate */
6666         if (!ret) {
6667                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6668                          vport->vport_id, addr);
6669                 return 0;
6670         }
6671
6672         dev_err(&hdev->pdev->dev,
6673                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6674                 addr);
6675
6676         return ret;
6677 }
6678
6679 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6680                             const unsigned char *addr)
6681 {
6682         struct hclge_vport *vport = hclge_get_vport(handle);
6683
6684         return hclge_rm_uc_addr_common(vport, addr);
6685 }
6686
6687 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6688                             const unsigned char *addr)
6689 {
6690         struct hclge_dev *hdev = vport->back;
6691         struct hclge_mac_vlan_tbl_entry_cmd req;
6692         int ret;
6693
6694         /* mac addr check */
6695         if (is_zero_ether_addr(addr) ||
6696             is_broadcast_ether_addr(addr) ||
6697             is_multicast_ether_addr(addr)) {
6698                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6699                         addr);
6700                 return -EINVAL;
6701         }
6702
6703         memset(&req, 0, sizeof(req));
6704         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6705         hclge_prepare_mac_addr(&req, addr, false);
6706         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6707         if (!ret)
6708                 hclge_update_umv_space(vport, true);
6709
6710         return ret;
6711 }
6712
6713 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6714                              const unsigned char *addr)
6715 {
6716         struct hclge_vport *vport = hclge_get_vport(handle);
6717
6718         return hclge_add_mc_addr_common(vport, addr);
6719 }
6720
6721 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6722                              const unsigned char *addr)
6723 {
6724         struct hclge_dev *hdev = vport->back;
6725         struct hclge_mac_vlan_tbl_entry_cmd req;
6726         struct hclge_desc desc[3];
6727         int status;
6728
6729         /* mac addr check */
6730         if (!is_multicast_ether_addr(addr)) {
6731                 dev_err(&hdev->pdev->dev,
6732                         "Add mc mac err! invalid mac:%pM.\n",
6733                          addr);
6734                 return -EINVAL;
6735         }
6736         memset(&req, 0, sizeof(req));
6737         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6738         hclge_prepare_mac_addr(&req, addr, true);
6739         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6740         if (status) {
6741                 /* This mac addr do not exist, add new entry for it */
6742                 memset(desc[0].data, 0, sizeof(desc[0].data));
6743                 memset(desc[1].data, 0, sizeof(desc[0].data));
6744                 memset(desc[2].data, 0, sizeof(desc[0].data));
6745         }
6746         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6747         if (status)
6748                 return status;
6749         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6750
6751         if (status == -ENOSPC)
6752                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6753
6754         return status;
6755 }
6756
6757 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6758                             const unsigned char *addr)
6759 {
6760         struct hclge_vport *vport = hclge_get_vport(handle);
6761
6762         return hclge_rm_mc_addr_common(vport, addr);
6763 }
6764
6765 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6766                             const unsigned char *addr)
6767 {
6768         struct hclge_dev *hdev = vport->back;
6769         struct hclge_mac_vlan_tbl_entry_cmd req;
6770         enum hclge_cmd_status status;
6771         struct hclge_desc desc[3];
6772
6773         /* mac addr check */
6774         if (!is_multicast_ether_addr(addr)) {
6775                 dev_dbg(&hdev->pdev->dev,
6776                         "Remove mc mac err! invalid mac:%pM.\n",
6777                          addr);
6778                 return -EINVAL;
6779         }
6780
6781         memset(&req, 0, sizeof(req));
6782         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6783         hclge_prepare_mac_addr(&req, addr, true);
6784         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6785         if (!status) {
6786                 /* This mac addr exist, remove this handle's VFID for it */
6787                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6788                 if (status)
6789                         return status;
6790
6791                 if (hclge_is_all_function_id_zero(desc))
6792                         /* All the vfid is zero, so need to delete this entry */
6793                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6794                 else
6795                         /* Not all the vfid is zero, update the vfid */
6796                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6797
6798         } else {
6799                 /* Maybe this mac address is in mta table, but it cannot be
6800                  * deleted here because an entry of mta represents an address
6801                  * range rather than a specific address. the delete action to
6802                  * all entries will take effect in update_mta_status called by
6803                  * hns3_nic_set_rx_mode.
6804                  */
6805                 status = 0;
6806         }
6807
6808         return status;
6809 }
6810
6811 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6812                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6813 {
6814         struct hclge_vport_mac_addr_cfg *mac_cfg;
6815         struct list_head *list;
6816
6817         if (!vport->vport_id)
6818                 return;
6819
6820         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6821         if (!mac_cfg)
6822                 return;
6823
6824         mac_cfg->hd_tbl_status = true;
6825         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6826
6827         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6828                &vport->uc_mac_list : &vport->mc_mac_list;
6829
6830         list_add_tail(&mac_cfg->node, list);
6831 }
6832
6833 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6834                               bool is_write_tbl,
6835                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6836 {
6837         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6838         struct list_head *list;
6839         bool uc_flag, mc_flag;
6840
6841         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6842                &vport->uc_mac_list : &vport->mc_mac_list;
6843
6844         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6845         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6846
6847         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6848                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6849                         if (uc_flag && mac_cfg->hd_tbl_status)
6850                                 hclge_rm_uc_addr_common(vport, mac_addr);
6851
6852                         if (mc_flag && mac_cfg->hd_tbl_status)
6853                                 hclge_rm_mc_addr_common(vport, mac_addr);
6854
6855                         list_del(&mac_cfg->node);
6856                         kfree(mac_cfg);
6857                         break;
6858                 }
6859         }
6860 }
6861
6862 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6863                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6864 {
6865         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6866         struct list_head *list;
6867
6868         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6869                &vport->uc_mac_list : &vport->mc_mac_list;
6870
6871         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6872                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6873                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6874
6875                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6876                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6877
6878                 mac_cfg->hd_tbl_status = false;
6879                 if (is_del_list) {
6880                         list_del(&mac_cfg->node);
6881                         kfree(mac_cfg);
6882                 }
6883         }
6884 }
6885
6886 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6887 {
6888         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6889         struct hclge_vport *vport;
6890         int i;
6891
6892         mutex_lock(&hdev->vport_cfg_mutex);
6893         for (i = 0; i < hdev->num_alloc_vport; i++) {
6894                 vport = &hdev->vport[i];
6895                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6896                         list_del(&mac->node);
6897                         kfree(mac);
6898                 }
6899
6900                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6901                         list_del(&mac->node);
6902                         kfree(mac);
6903                 }
6904         }
6905         mutex_unlock(&hdev->vport_cfg_mutex);
6906 }
6907
6908 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6909                                               u16 cmdq_resp, u8 resp_code)
6910 {
6911 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6912 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6913 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6914 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6915
6916         int return_status;
6917
6918         if (cmdq_resp) {
6919                 dev_err(&hdev->pdev->dev,
6920                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6921                         cmdq_resp);
6922                 return -EIO;
6923         }
6924
6925         switch (resp_code) {
6926         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6927         case HCLGE_ETHERTYPE_ALREADY_ADD:
6928                 return_status = 0;
6929                 break;
6930         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6931                 dev_err(&hdev->pdev->dev,
6932                         "add mac ethertype failed for manager table overflow.\n");
6933                 return_status = -EIO;
6934                 break;
6935         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6936                 dev_err(&hdev->pdev->dev,
6937                         "add mac ethertype failed for key conflict.\n");
6938                 return_status = -EIO;
6939                 break;
6940         default:
6941                 dev_err(&hdev->pdev->dev,
6942                         "add mac ethertype failed for undefined, code=%d.\n",
6943                         resp_code);
6944                 return_status = -EIO;
6945         }
6946
6947         return return_status;
6948 }
6949
6950 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6951                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6952 {
6953         struct hclge_desc desc;
6954         u8 resp_code;
6955         u16 retval;
6956         int ret;
6957
6958         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6959         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6960
6961         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6962         if (ret) {
6963                 dev_err(&hdev->pdev->dev,
6964                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6965                         ret);
6966                 return ret;
6967         }
6968
6969         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6970         retval = le16_to_cpu(desc.retval);
6971
6972         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6973 }
6974
6975 static int init_mgr_tbl(struct hclge_dev *hdev)
6976 {
6977         int ret;
6978         int i;
6979
6980         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6981                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6982                 if (ret) {
6983                         dev_err(&hdev->pdev->dev,
6984                                 "add mac ethertype failed, ret =%d.\n",
6985                                 ret);
6986                         return ret;
6987                 }
6988         }
6989
6990         return 0;
6991 }
6992
6993 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6994 {
6995         struct hclge_vport *vport = hclge_get_vport(handle);
6996         struct hclge_dev *hdev = vport->back;
6997
6998         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6999 }
7000
7001 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7002                               bool is_first)
7003 {
7004         const unsigned char *new_addr = (const unsigned char *)p;
7005         struct hclge_vport *vport = hclge_get_vport(handle);
7006         struct hclge_dev *hdev = vport->back;
7007         int ret;
7008
7009         /* mac addr check */
7010         if (is_zero_ether_addr(new_addr) ||
7011             is_broadcast_ether_addr(new_addr) ||
7012             is_multicast_ether_addr(new_addr)) {
7013                 dev_err(&hdev->pdev->dev,
7014                         "Change uc mac err! invalid mac:%p.\n",
7015                          new_addr);
7016                 return -EINVAL;
7017         }
7018
7019         if ((!is_first || is_kdump_kernel()) &&
7020             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7021                 dev_warn(&hdev->pdev->dev,
7022                          "remove old uc mac address fail.\n");
7023
7024         ret = hclge_add_uc_addr(handle, new_addr);
7025         if (ret) {
7026                 dev_err(&hdev->pdev->dev,
7027                         "add uc mac address fail, ret =%d.\n",
7028                         ret);
7029
7030                 if (!is_first &&
7031                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7032                         dev_err(&hdev->pdev->dev,
7033                                 "restore uc mac address fail.\n");
7034
7035                 return -EIO;
7036         }
7037
7038         ret = hclge_pause_addr_cfg(hdev, new_addr);
7039         if (ret) {
7040                 dev_err(&hdev->pdev->dev,
7041                         "configure mac pause address fail, ret =%d.\n",
7042                         ret);
7043                 return -EIO;
7044         }
7045
7046         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7047
7048         return 0;
7049 }
7050
7051 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7052                           int cmd)
7053 {
7054         struct hclge_vport *vport = hclge_get_vport(handle);
7055         struct hclge_dev *hdev = vport->back;
7056
7057         if (!hdev->hw.mac.phydev)
7058                 return -EOPNOTSUPP;
7059
7060         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7061 }
7062
7063 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7064                                       u8 fe_type, bool filter_en, u8 vf_id)
7065 {
7066         struct hclge_vlan_filter_ctrl_cmd *req;
7067         struct hclge_desc desc;
7068         int ret;
7069
7070         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7071
7072         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7073         req->vlan_type = vlan_type;
7074         req->vlan_fe = filter_en ? fe_type : 0;
7075         req->vf_id = vf_id;
7076
7077         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7078         if (ret)
7079                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7080                         ret);
7081
7082         return ret;
7083 }
7084
7085 #define HCLGE_FILTER_TYPE_VF            0
7086 #define HCLGE_FILTER_TYPE_PORT          1
7087 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7088 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7089 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7090 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7091 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7092 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7093                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7094 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7095                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7096
7097 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7098 {
7099         struct hclge_vport *vport = hclge_get_vport(handle);
7100         struct hclge_dev *hdev = vport->back;
7101
7102         if (hdev->pdev->revision >= 0x21) {
7103                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7104                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7105                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7106                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7107         } else {
7108                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7109                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7110                                            0);
7111         }
7112         if (enable)
7113                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7114         else
7115                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7116 }
7117
7118 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7119                                     bool is_kill, u16 vlan, u8 qos,
7120                                     __be16 proto)
7121 {
7122 #define HCLGE_MAX_VF_BYTES  16
7123         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7124         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7125         struct hclge_desc desc[2];
7126         u8 vf_byte_val;
7127         u8 vf_byte_off;
7128         int ret;
7129
7130         /* if vf vlan table is full, firmware will close vf vlan filter, it
7131          * is unable and unnecessary to add new vlan id to vf vlan filter
7132          */
7133         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7134                 return 0;
7135
7136         hclge_cmd_setup_basic_desc(&desc[0],
7137                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7138         hclge_cmd_setup_basic_desc(&desc[1],
7139                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7140
7141         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7142
7143         vf_byte_off = vfid / 8;
7144         vf_byte_val = 1 << (vfid % 8);
7145
7146         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7147         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7148
7149         req0->vlan_id  = cpu_to_le16(vlan);
7150         req0->vlan_cfg = is_kill;
7151
7152         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7153                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7154         else
7155                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7156
7157         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7158         if (ret) {
7159                 dev_err(&hdev->pdev->dev,
7160                         "Send vf vlan command fail, ret =%d.\n",
7161                         ret);
7162                 return ret;
7163         }
7164
7165         if (!is_kill) {
7166 #define HCLGE_VF_VLAN_NO_ENTRY  2
7167                 if (!req0->resp_code || req0->resp_code == 1)
7168                         return 0;
7169
7170                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7171                         set_bit(vfid, hdev->vf_vlan_full);
7172                         dev_warn(&hdev->pdev->dev,
7173                                  "vf vlan table is full, vf vlan filter is disabled\n");
7174                         return 0;
7175                 }
7176
7177                 dev_err(&hdev->pdev->dev,
7178                         "Add vf vlan filter fail, ret =%d.\n",
7179                         req0->resp_code);
7180         } else {
7181 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7182                 if (!req0->resp_code)
7183                         return 0;
7184
7185                 /* vf vlan filter is disabled when vf vlan table is full,
7186                  * then new vlan id will not be added into vf vlan table.
7187                  * Just return 0 without warning, avoid massive verbose
7188                  * print logs when unload.
7189                  */
7190                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7191                         return 0;
7192
7193                 dev_err(&hdev->pdev->dev,
7194                         "Kill vf vlan filter fail, ret =%d.\n",
7195                         req0->resp_code);
7196         }
7197
7198         return -EIO;
7199 }
7200
7201 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7202                                       u16 vlan_id, bool is_kill)
7203 {
7204         struct hclge_vlan_filter_pf_cfg_cmd *req;
7205         struct hclge_desc desc;
7206         u8 vlan_offset_byte_val;
7207         u8 vlan_offset_byte;
7208         u8 vlan_offset_160;
7209         int ret;
7210
7211         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7212
7213         vlan_offset_160 = vlan_id / 160;
7214         vlan_offset_byte = (vlan_id % 160) / 8;
7215         vlan_offset_byte_val = 1 << (vlan_id % 8);
7216
7217         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7218         req->vlan_offset = vlan_offset_160;
7219         req->vlan_cfg = is_kill;
7220         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7221
7222         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7223         if (ret)
7224                 dev_err(&hdev->pdev->dev,
7225                         "port vlan command, send fail, ret =%d.\n", ret);
7226         return ret;
7227 }
7228
7229 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7230                                     u16 vport_id, u16 vlan_id, u8 qos,
7231                                     bool is_kill)
7232 {
7233         u16 vport_idx, vport_num = 0;
7234         int ret;
7235
7236         if (is_kill && !vlan_id)
7237                 return 0;
7238
7239         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7240                                        0, proto);
7241         if (ret) {
7242                 dev_err(&hdev->pdev->dev,
7243                         "Set %d vport vlan filter config fail, ret =%d.\n",
7244                         vport_id, ret);
7245                 return ret;
7246         }
7247
7248         /* vlan 0 may be added twice when 8021q module is enabled */
7249         if (!is_kill && !vlan_id &&
7250             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7251                 return 0;
7252
7253         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7254                 dev_err(&hdev->pdev->dev,
7255                         "Add port vlan failed, vport %d is already in vlan %d\n",
7256                         vport_id, vlan_id);
7257                 return -EINVAL;
7258         }
7259
7260         if (is_kill &&
7261             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7262                 dev_err(&hdev->pdev->dev,
7263                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7264                         vport_id, vlan_id);
7265                 return -EINVAL;
7266         }
7267
7268         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7269                 vport_num++;
7270
7271         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7272                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7273                                                  is_kill);
7274
7275         return ret;
7276 }
7277
7278 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7279 {
7280         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7281         struct hclge_vport_vtag_tx_cfg_cmd *req;
7282         struct hclge_dev *hdev = vport->back;
7283         struct hclge_desc desc;
7284         int status;
7285
7286         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7287
7288         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7289         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7290         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7291         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7292                       vcfg->accept_tag1 ? 1 : 0);
7293         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7294                       vcfg->accept_untag1 ? 1 : 0);
7295         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7296                       vcfg->accept_tag2 ? 1 : 0);
7297         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7298                       vcfg->accept_untag2 ? 1 : 0);
7299         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7300                       vcfg->insert_tag1_en ? 1 : 0);
7301         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7302                       vcfg->insert_tag2_en ? 1 : 0);
7303         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7304
7305         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7306         req->vf_bitmap[req->vf_offset] =
7307                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7308
7309         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7310         if (status)
7311                 dev_err(&hdev->pdev->dev,
7312                         "Send port txvlan cfg command fail, ret =%d\n",
7313                         status);
7314
7315         return status;
7316 }
7317
7318 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7319 {
7320         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7321         struct hclge_vport_vtag_rx_cfg_cmd *req;
7322         struct hclge_dev *hdev = vport->back;
7323         struct hclge_desc desc;
7324         int status;
7325
7326         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7327
7328         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7329         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7330                       vcfg->strip_tag1_en ? 1 : 0);
7331         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7332                       vcfg->strip_tag2_en ? 1 : 0);
7333         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7334                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7335         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7336                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7337
7338         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7339         req->vf_bitmap[req->vf_offset] =
7340                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7341
7342         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7343         if (status)
7344                 dev_err(&hdev->pdev->dev,
7345                         "Send port rxvlan cfg command fail, ret =%d\n",
7346                         status);
7347
7348         return status;
7349 }
7350
7351 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7352                                   u16 port_base_vlan_state,
7353                                   u16 vlan_tag)
7354 {
7355         int ret;
7356
7357         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7358                 vport->txvlan_cfg.accept_tag1 = true;
7359                 vport->txvlan_cfg.insert_tag1_en = false;
7360                 vport->txvlan_cfg.default_tag1 = 0;
7361         } else {
7362                 vport->txvlan_cfg.accept_tag1 = false;
7363                 vport->txvlan_cfg.insert_tag1_en = true;
7364                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7365         }
7366
7367         vport->txvlan_cfg.accept_untag1 = true;
7368
7369         /* accept_tag2 and accept_untag2 are not supported on
7370          * pdev revision(0x20), new revision support them,
7371          * this two fields can not be configured by user.
7372          */
7373         vport->txvlan_cfg.accept_tag2 = true;
7374         vport->txvlan_cfg.accept_untag2 = true;
7375         vport->txvlan_cfg.insert_tag2_en = false;
7376         vport->txvlan_cfg.default_tag2 = 0;
7377
7378         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7379                 vport->rxvlan_cfg.strip_tag1_en = false;
7380                 vport->rxvlan_cfg.strip_tag2_en =
7381                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7382         } else {
7383                 vport->rxvlan_cfg.strip_tag1_en =
7384                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7385                 vport->rxvlan_cfg.strip_tag2_en = true;
7386         }
7387         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7388         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7389
7390         ret = hclge_set_vlan_tx_offload_cfg(vport);
7391         if (ret)
7392                 return ret;
7393
7394         return hclge_set_vlan_rx_offload_cfg(vport);
7395 }
7396
7397 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7398 {
7399         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7400         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7401         struct hclge_desc desc;
7402         int status;
7403
7404         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7405         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7406         rx_req->ot_fst_vlan_type =
7407                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7408         rx_req->ot_sec_vlan_type =
7409                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7410         rx_req->in_fst_vlan_type =
7411                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7412         rx_req->in_sec_vlan_type =
7413                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7414
7415         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7416         if (status) {
7417                 dev_err(&hdev->pdev->dev,
7418                         "Send rxvlan protocol type command fail, ret =%d\n",
7419                         status);
7420                 return status;
7421         }
7422
7423         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7424
7425         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7426         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7427         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7428
7429         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7430         if (status)
7431                 dev_err(&hdev->pdev->dev,
7432                         "Send txvlan protocol type command fail, ret =%d\n",
7433                         status);
7434
7435         return status;
7436 }
7437
7438 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7439 {
7440 #define HCLGE_DEF_VLAN_TYPE             0x8100
7441
7442         struct hnae3_handle *handle = &hdev->vport[0].nic;
7443         struct hclge_vport *vport;
7444         int ret;
7445         int i;
7446
7447         if (hdev->pdev->revision >= 0x21) {
7448                 /* for revision 0x21, vf vlan filter is per function */
7449                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7450                         vport = &hdev->vport[i];
7451                         ret = hclge_set_vlan_filter_ctrl(hdev,
7452                                                          HCLGE_FILTER_TYPE_VF,
7453                                                          HCLGE_FILTER_FE_EGRESS,
7454                                                          true,
7455                                                          vport->vport_id);
7456                         if (ret)
7457                                 return ret;
7458                 }
7459
7460                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7461                                                  HCLGE_FILTER_FE_INGRESS, true,
7462                                                  0);
7463                 if (ret)
7464                         return ret;
7465         } else {
7466                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7467                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7468                                                  true, 0);
7469                 if (ret)
7470                         return ret;
7471         }
7472
7473         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7474
7475         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7476         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7477         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7478         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7479         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7480         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7481
7482         ret = hclge_set_vlan_protocol_type(hdev);
7483         if (ret)
7484                 return ret;
7485
7486         for (i = 0; i < hdev->num_alloc_vport; i++) {
7487                 u16 vlan_tag;
7488
7489                 vport = &hdev->vport[i];
7490                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7491
7492                 ret = hclge_vlan_offload_cfg(vport,
7493                                              vport->port_base_vlan_cfg.state,
7494                                              vlan_tag);
7495                 if (ret)
7496                         return ret;
7497         }
7498
7499         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7500 }
7501
7502 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7503                                        bool writen_to_tbl)
7504 {
7505         struct hclge_vport_vlan_cfg *vlan;
7506
7507         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7508         if (!vlan)
7509                 return;
7510
7511         vlan->hd_tbl_status = writen_to_tbl;
7512         vlan->vlan_id = vlan_id;
7513
7514         list_add_tail(&vlan->node, &vport->vlan_list);
7515 }
7516
7517 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7518 {
7519         struct hclge_vport_vlan_cfg *vlan, *tmp;
7520         struct hclge_dev *hdev = vport->back;
7521         int ret;
7522
7523         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7524                 if (!vlan->hd_tbl_status) {
7525                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7526                                                        vport->vport_id,
7527                                                        vlan->vlan_id, 0, false);
7528                         if (ret) {
7529                                 dev_err(&hdev->pdev->dev,
7530                                         "restore vport vlan list failed, ret=%d\n",
7531                                         ret);
7532                                 return ret;
7533                         }
7534                 }
7535                 vlan->hd_tbl_status = true;
7536         }
7537
7538         return 0;
7539 }
7540
7541 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7542                                       bool is_write_tbl)
7543 {
7544         struct hclge_vport_vlan_cfg *vlan, *tmp;
7545         struct hclge_dev *hdev = vport->back;
7546
7547         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7548                 if (vlan->vlan_id == vlan_id) {
7549                         if (is_write_tbl && vlan->hd_tbl_status)
7550                                 hclge_set_vlan_filter_hw(hdev,
7551                                                          htons(ETH_P_8021Q),
7552                                                          vport->vport_id,
7553                                                          vlan_id, 0,
7554                                                          true);
7555
7556                         list_del(&vlan->node);
7557                         kfree(vlan);
7558                         break;
7559                 }
7560         }
7561 }
7562
7563 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7564 {
7565         struct hclge_vport_vlan_cfg *vlan, *tmp;
7566         struct hclge_dev *hdev = vport->back;
7567
7568         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7569                 if (vlan->hd_tbl_status)
7570                         hclge_set_vlan_filter_hw(hdev,
7571                                                  htons(ETH_P_8021Q),
7572                                                  vport->vport_id,
7573                                                  vlan->vlan_id, 0,
7574                                                  true);
7575
7576                 vlan->hd_tbl_status = false;
7577                 if (is_del_list) {
7578                         list_del(&vlan->node);
7579                         kfree(vlan);
7580                 }
7581         }
7582 }
7583
7584 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7585 {
7586         struct hclge_vport_vlan_cfg *vlan, *tmp;
7587         struct hclge_vport *vport;
7588         int i;
7589
7590         mutex_lock(&hdev->vport_cfg_mutex);
7591         for (i = 0; i < hdev->num_alloc_vport; i++) {
7592                 vport = &hdev->vport[i];
7593                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7594                         list_del(&vlan->node);
7595                         kfree(vlan);
7596                 }
7597         }
7598         mutex_unlock(&hdev->vport_cfg_mutex);
7599 }
7600
7601 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7602 {
7603         struct hclge_vport *vport = hclge_get_vport(handle);
7604         struct hclge_vport_vlan_cfg *vlan, *tmp;
7605         struct hclge_dev *hdev = vport->back;
7606         u16 vlan_proto, qos;
7607         u16 state, vlan_id;
7608         int i;
7609
7610         mutex_lock(&hdev->vport_cfg_mutex);
7611         for (i = 0; i < hdev->num_alloc_vport; i++) {
7612                 vport = &hdev->vport[i];
7613                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7614                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7615                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7616                 state = vport->port_base_vlan_cfg.state;
7617
7618                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7619                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7620                                                  vport->vport_id, vlan_id, qos,
7621                                                  false);
7622                         continue;
7623                 }
7624
7625                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7626                         if (vlan->hd_tbl_status)
7627                                 hclge_set_vlan_filter_hw(hdev,
7628                                                          htons(ETH_P_8021Q),
7629                                                          vport->vport_id,
7630                                                          vlan->vlan_id, 0,
7631                                                          false);
7632                 }
7633         }
7634
7635         mutex_unlock(&hdev->vport_cfg_mutex);
7636 }
7637
7638 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7639 {
7640         struct hclge_vport *vport = hclge_get_vport(handle);
7641
7642         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7643                 vport->rxvlan_cfg.strip_tag1_en = false;
7644                 vport->rxvlan_cfg.strip_tag2_en = enable;
7645         } else {
7646                 vport->rxvlan_cfg.strip_tag1_en = enable;
7647                 vport->rxvlan_cfg.strip_tag2_en = true;
7648         }
7649         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7650         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7651         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7652
7653         return hclge_set_vlan_rx_offload_cfg(vport);
7654 }
7655
7656 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7657                                             u16 port_base_vlan_state,
7658                                             struct hclge_vlan_info *new_info,
7659                                             struct hclge_vlan_info *old_info)
7660 {
7661         struct hclge_dev *hdev = vport->back;
7662         int ret;
7663
7664         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7665                 hclge_rm_vport_all_vlan_table(vport, false);
7666                 return hclge_set_vlan_filter_hw(hdev,
7667                                                  htons(new_info->vlan_proto),
7668                                                  vport->vport_id,
7669                                                  new_info->vlan_tag,
7670                                                  new_info->qos, false);
7671         }
7672
7673         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7674                                        vport->vport_id, old_info->vlan_tag,
7675                                        old_info->qos, true);
7676         if (ret)
7677                 return ret;
7678
7679         return hclge_add_vport_all_vlan_table(vport);
7680 }
7681
7682 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7683                                     struct hclge_vlan_info *vlan_info)
7684 {
7685         struct hnae3_handle *nic = &vport->nic;
7686         struct hclge_vlan_info *old_vlan_info;
7687         struct hclge_dev *hdev = vport->back;
7688         int ret;
7689
7690         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7691
7692         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7693         if (ret)
7694                 return ret;
7695
7696         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7697                 /* add new VLAN tag */
7698                 ret = hclge_set_vlan_filter_hw(hdev,
7699                                                htons(vlan_info->vlan_proto),
7700                                                vport->vport_id,
7701                                                vlan_info->vlan_tag,
7702                                                vlan_info->qos, false);
7703                 if (ret)
7704                         return ret;
7705
7706                 /* remove old VLAN tag */
7707                 ret = hclge_set_vlan_filter_hw(hdev,
7708                                                htons(old_vlan_info->vlan_proto),
7709                                                vport->vport_id,
7710                                                old_vlan_info->vlan_tag,
7711                                                old_vlan_info->qos, true);
7712                 if (ret)
7713                         return ret;
7714
7715                 goto update;
7716         }
7717
7718         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7719                                                old_vlan_info);
7720         if (ret)
7721                 return ret;
7722
7723         /* update state only when disable/enable port based VLAN */
7724         vport->port_base_vlan_cfg.state = state;
7725         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7726                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7727         else
7728                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7729
7730 update:
7731         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7732         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7733         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7734
7735         return 0;
7736 }
7737
7738 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7739                                           enum hnae3_port_base_vlan_state state,
7740                                           u16 vlan)
7741 {
7742         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7743                 if (!vlan)
7744                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7745                 else
7746                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7747         } else {
7748                 if (!vlan)
7749                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7750                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7751                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7752                 else
7753                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7754         }
7755 }
7756
7757 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7758                                     u16 vlan, u8 qos, __be16 proto)
7759 {
7760         struct hclge_vport *vport = hclge_get_vport(handle);
7761         struct hclge_dev *hdev = vport->back;
7762         struct hclge_vlan_info vlan_info;
7763         u16 state;
7764         int ret;
7765
7766         if (hdev->pdev->revision == 0x20)
7767                 return -EOPNOTSUPP;
7768
7769         /* qos is a 3 bits value, so can not be bigger than 7 */
7770         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7771                 return -EINVAL;
7772         if (proto != htons(ETH_P_8021Q))
7773                 return -EPROTONOSUPPORT;
7774
7775         vport = &hdev->vport[vfid];
7776         state = hclge_get_port_base_vlan_state(vport,
7777                                                vport->port_base_vlan_cfg.state,
7778                                                vlan);
7779         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7780                 return 0;
7781
7782         vlan_info.vlan_tag = vlan;
7783         vlan_info.qos = qos;
7784         vlan_info.vlan_proto = ntohs(proto);
7785
7786         /* update port based VLAN for PF */
7787         if (!vfid) {
7788                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7789                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7790                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7791
7792                 return ret;
7793         }
7794
7795         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7796                 return hclge_update_port_base_vlan_cfg(vport, state,
7797                                                        &vlan_info);
7798         } else {
7799                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7800                                                         (u8)vfid, state,
7801                                                         vlan, qos,
7802                                                         ntohs(proto));
7803                 return ret;
7804         }
7805 }
7806
7807 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7808                           u16 vlan_id, bool is_kill)
7809 {
7810         struct hclge_vport *vport = hclge_get_vport(handle);
7811         struct hclge_dev *hdev = vport->back;
7812         bool writen_to_tbl = false;
7813         int ret = 0;
7814
7815         /* When device is resetting, firmware is unable to handle
7816          * mailbox. Just record the vlan id, and remove it after
7817          * reset finished.
7818          */
7819         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
7820                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
7821                 return -EBUSY;
7822         }
7823
7824         /* When port base vlan enabled, we use port base vlan as the vlan
7825          * filter entry. In this case, we don't update vlan filter table
7826          * when user add new vlan or remove exist vlan, just update the vport
7827          * vlan list. The vlan id in vlan list will be writen in vlan filter
7828          * table until port base vlan disabled
7829          */
7830         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7831                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7832                                                vlan_id, 0, is_kill);
7833                 writen_to_tbl = true;
7834         }
7835
7836         if (!ret) {
7837                 if (is_kill)
7838                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
7839                 else
7840                         hclge_add_vport_vlan_table(vport, vlan_id,
7841                                                    writen_to_tbl);
7842         } else if (is_kill) {
7843                 /* When remove hw vlan filter failed, record the vlan id,
7844                  * and try to remove it from hw later, to be consistence
7845                  * with stack
7846                  */
7847                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
7848         }
7849         return ret;
7850 }
7851
7852 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
7853 {
7854 #define HCLGE_MAX_SYNC_COUNT    60
7855
7856         int i, ret, sync_cnt = 0;
7857         u16 vlan_id;
7858
7859         /* start from vport 1 for PF is always alive */
7860         for (i = 0; i < hdev->num_alloc_vport; i++) {
7861                 struct hclge_vport *vport = &hdev->vport[i];
7862
7863                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7864                                          VLAN_N_VID);
7865                 while (vlan_id != VLAN_N_VID) {
7866                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7867                                                        vport->vport_id, vlan_id,
7868                                                        0, true);
7869                         if (ret && ret != -EINVAL)
7870                                 return;
7871
7872                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
7873                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
7874
7875                         sync_cnt++;
7876                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
7877                                 return;
7878
7879                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7880                                                  VLAN_N_VID);
7881                 }
7882         }
7883 }
7884
7885 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7886 {
7887         struct hclge_config_max_frm_size_cmd *req;
7888         struct hclge_desc desc;
7889
7890         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7891
7892         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7893         req->max_frm_size = cpu_to_le16(new_mps);
7894         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7895
7896         return hclge_cmd_send(&hdev->hw, &desc, 1);
7897 }
7898
7899 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7900 {
7901         struct hclge_vport *vport = hclge_get_vport(handle);
7902
7903         return hclge_set_vport_mtu(vport, new_mtu);
7904 }
7905
7906 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7907 {
7908         struct hclge_dev *hdev = vport->back;
7909         int i, max_frm_size, ret;
7910
7911         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7912         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7913             max_frm_size > HCLGE_MAC_MAX_FRAME)
7914                 return -EINVAL;
7915
7916         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7917         mutex_lock(&hdev->vport_lock);
7918         /* VF's mps must fit within hdev->mps */
7919         if (vport->vport_id && max_frm_size > hdev->mps) {
7920                 mutex_unlock(&hdev->vport_lock);
7921                 return -EINVAL;
7922         } else if (vport->vport_id) {
7923                 vport->mps = max_frm_size;
7924                 mutex_unlock(&hdev->vport_lock);
7925                 return 0;
7926         }
7927
7928         /* PF's mps must be greater then VF's mps */
7929         for (i = 1; i < hdev->num_alloc_vport; i++)
7930                 if (max_frm_size < hdev->vport[i].mps) {
7931                         mutex_unlock(&hdev->vport_lock);
7932                         return -EINVAL;
7933                 }
7934
7935         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7936
7937         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7938         if (ret) {
7939                 dev_err(&hdev->pdev->dev,
7940                         "Change mtu fail, ret =%d\n", ret);
7941                 goto out;
7942         }
7943
7944         hdev->mps = max_frm_size;
7945         vport->mps = max_frm_size;
7946
7947         ret = hclge_buffer_alloc(hdev);
7948         if (ret)
7949                 dev_err(&hdev->pdev->dev,
7950                         "Allocate buffer fail, ret =%d\n", ret);
7951
7952 out:
7953         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7954         mutex_unlock(&hdev->vport_lock);
7955         return ret;
7956 }
7957
7958 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7959                                     bool enable)
7960 {
7961         struct hclge_reset_tqp_queue_cmd *req;
7962         struct hclge_desc desc;
7963         int ret;
7964
7965         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7966
7967         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7968         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7969         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7970
7971         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7972         if (ret) {
7973                 dev_err(&hdev->pdev->dev,
7974                         "Send tqp reset cmd error, status =%d\n", ret);
7975                 return ret;
7976         }
7977
7978         return 0;
7979 }
7980
7981 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7982 {
7983         struct hclge_reset_tqp_queue_cmd *req;
7984         struct hclge_desc desc;
7985         int ret;
7986
7987         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7988
7989         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7990         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7991
7992         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7993         if (ret) {
7994                 dev_err(&hdev->pdev->dev,
7995                         "Get reset status error, status =%d\n", ret);
7996                 return ret;
7997         }
7998
7999         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8000 }
8001
8002 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8003 {
8004         struct hnae3_queue *queue;
8005         struct hclge_tqp *tqp;
8006
8007         queue = handle->kinfo.tqp[queue_id];
8008         tqp = container_of(queue, struct hclge_tqp, q);
8009
8010         return tqp->index;
8011 }
8012
8013 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8014 {
8015         struct hclge_vport *vport = hclge_get_vport(handle);
8016         struct hclge_dev *hdev = vport->back;
8017         int reset_try_times = 0;
8018         int reset_status;
8019         u16 queue_gid;
8020         int ret;
8021
8022         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8023
8024         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8025         if (ret) {
8026                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8027                 return ret;
8028         }
8029
8030         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8031         if (ret) {
8032                 dev_err(&hdev->pdev->dev,
8033                         "Send reset tqp cmd fail, ret = %d\n", ret);
8034                 return ret;
8035         }
8036
8037         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8038                 /* Wait for tqp hw reset */
8039                 msleep(20);
8040                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8041                 if (reset_status)
8042                         break;
8043         }
8044
8045         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8046                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8047                 return ret;
8048         }
8049
8050         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8051         if (ret)
8052                 dev_err(&hdev->pdev->dev,
8053                         "Deassert the soft reset fail, ret = %d\n", ret);
8054
8055         return ret;
8056 }
8057
8058 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8059 {
8060         struct hclge_dev *hdev = vport->back;
8061         int reset_try_times = 0;
8062         int reset_status;
8063         u16 queue_gid;
8064         int ret;
8065
8066         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8067
8068         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8069         if (ret) {
8070                 dev_warn(&hdev->pdev->dev,
8071                          "Send reset tqp cmd fail, ret = %d\n", ret);
8072                 return;
8073         }
8074
8075         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8076                 /* Wait for tqp hw reset */
8077                 msleep(20);
8078                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8079                 if (reset_status)
8080                         break;
8081         }
8082
8083         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8084                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8085                 return;
8086         }
8087
8088         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8089         if (ret)
8090                 dev_warn(&hdev->pdev->dev,
8091                          "Deassert the soft reset fail, ret = %d\n", ret);
8092 }
8093
8094 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8095 {
8096         struct hclge_vport *vport = hclge_get_vport(handle);
8097         struct hclge_dev *hdev = vport->back;
8098
8099         return hdev->fw_version;
8100 }
8101
8102 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8103 {
8104         struct phy_device *phydev = hdev->hw.mac.phydev;
8105
8106         if (!phydev)
8107                 return;
8108
8109         phy_set_asym_pause(phydev, rx_en, tx_en);
8110 }
8111
8112 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8113 {
8114         int ret;
8115
8116         if (rx_en && tx_en)
8117                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8118         else if (rx_en && !tx_en)
8119                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8120         else if (!rx_en && tx_en)
8121                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8122         else
8123                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8124
8125         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8126                 return 0;
8127
8128         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8129         if (ret) {
8130                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
8131                         ret);
8132                 return ret;
8133         }
8134
8135         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8136
8137         return 0;
8138 }
8139
8140 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8141 {
8142         struct phy_device *phydev = hdev->hw.mac.phydev;
8143         u16 remote_advertising = 0;
8144         u16 local_advertising;
8145         u32 rx_pause, tx_pause;
8146         u8 flowctl;
8147
8148         if (!phydev->link || !phydev->autoneg)
8149                 return 0;
8150
8151         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8152
8153         if (phydev->pause)
8154                 remote_advertising = LPA_PAUSE_CAP;
8155
8156         if (phydev->asym_pause)
8157                 remote_advertising |= LPA_PAUSE_ASYM;
8158
8159         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8160                                            remote_advertising);
8161         tx_pause = flowctl & FLOW_CTRL_TX;
8162         rx_pause = flowctl & FLOW_CTRL_RX;
8163
8164         if (phydev->duplex == HCLGE_MAC_HALF) {
8165                 tx_pause = 0;
8166                 rx_pause = 0;
8167         }
8168
8169         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8170 }
8171
8172 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8173                                  u32 *rx_en, u32 *tx_en)
8174 {
8175         struct hclge_vport *vport = hclge_get_vport(handle);
8176         struct hclge_dev *hdev = vport->back;
8177
8178         *auto_neg = hclge_get_autoneg(handle);
8179
8180         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8181                 *rx_en = 0;
8182                 *tx_en = 0;
8183                 return;
8184         }
8185
8186         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8187                 *rx_en = 1;
8188                 *tx_en = 0;
8189         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8190                 *tx_en = 1;
8191                 *rx_en = 0;
8192         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8193                 *rx_en = 1;
8194                 *tx_en = 1;
8195         } else {
8196                 *rx_en = 0;
8197                 *tx_en = 0;
8198         }
8199 }
8200
8201 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8202                                 u32 rx_en, u32 tx_en)
8203 {
8204         struct hclge_vport *vport = hclge_get_vport(handle);
8205         struct hclge_dev *hdev = vport->back;
8206         struct phy_device *phydev = hdev->hw.mac.phydev;
8207         u32 fc_autoneg;
8208
8209         fc_autoneg = hclge_get_autoneg(handle);
8210         if (auto_neg != fc_autoneg) {
8211                 dev_info(&hdev->pdev->dev,
8212                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8213                 return -EOPNOTSUPP;
8214         }
8215
8216         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8217                 dev_info(&hdev->pdev->dev,
8218                          "Priority flow control enabled. Cannot set link flow control.\n");
8219                 return -EOPNOTSUPP;
8220         }
8221
8222         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8223
8224         if (!fc_autoneg)
8225                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8226
8227         if (phydev)
8228                 return phy_start_aneg(phydev);
8229
8230         if (hdev->pdev->revision == 0x20)
8231                 return -EOPNOTSUPP;
8232
8233         return hclge_restart_autoneg(handle);
8234 }
8235
8236 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8237                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8238 {
8239         struct hclge_vport *vport = hclge_get_vport(handle);
8240         struct hclge_dev *hdev = vport->back;
8241
8242         if (speed)
8243                 *speed = hdev->hw.mac.speed;
8244         if (duplex)
8245                 *duplex = hdev->hw.mac.duplex;
8246         if (auto_neg)
8247                 *auto_neg = hdev->hw.mac.autoneg;
8248 }
8249
8250 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8251                                  u8 *module_type)
8252 {
8253         struct hclge_vport *vport = hclge_get_vport(handle);
8254         struct hclge_dev *hdev = vport->back;
8255
8256         if (media_type)
8257                 *media_type = hdev->hw.mac.media_type;
8258
8259         if (module_type)
8260                 *module_type = hdev->hw.mac.module_type;
8261 }
8262
8263 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8264                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8265 {
8266         struct hclge_vport *vport = hclge_get_vport(handle);
8267         struct hclge_dev *hdev = vport->back;
8268         struct phy_device *phydev = hdev->hw.mac.phydev;
8269         int mdix_ctrl, mdix, is_resolved;
8270         unsigned int retval;
8271
8272         if (!phydev) {
8273                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8274                 *tp_mdix = ETH_TP_MDI_INVALID;
8275                 return;
8276         }
8277
8278         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8279
8280         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8281         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8282                                     HCLGE_PHY_MDIX_CTRL_S);
8283
8284         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8285         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8286         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8287
8288         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8289
8290         switch (mdix_ctrl) {
8291         case 0x0:
8292                 *tp_mdix_ctrl = ETH_TP_MDI;
8293                 break;
8294         case 0x1:
8295                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8296                 break;
8297         case 0x3:
8298                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8299                 break;
8300         default:
8301                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8302                 break;
8303         }
8304
8305         if (!is_resolved)
8306                 *tp_mdix = ETH_TP_MDI_INVALID;
8307         else if (mdix)
8308                 *tp_mdix = ETH_TP_MDI_X;
8309         else
8310                 *tp_mdix = ETH_TP_MDI;
8311 }
8312
8313 static void hclge_info_show(struct hclge_dev *hdev)
8314 {
8315         struct device *dev = &hdev->pdev->dev;
8316
8317         dev_info(dev, "PF info begin:\n");
8318
8319         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8320         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8321         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8322         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8323         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8324         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8325         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8326         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8327         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8328         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8329         dev_info(dev, "This is %s PF\n",
8330                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8331         dev_info(dev, "DCB %s\n",
8332                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8333         dev_info(dev, "MQPRIO %s\n",
8334                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8335
8336         dev_info(dev, "PF info end.\n");
8337 }
8338
8339 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8340                                           struct hclge_vport *vport)
8341 {
8342         struct hnae3_client *client = vport->nic.client;
8343         struct hclge_dev *hdev = ae_dev->priv;
8344         int rst_cnt;
8345         int ret;
8346
8347         rst_cnt = hdev->rst_stats.reset_cnt;
8348         ret = client->ops->init_instance(&vport->nic);
8349         if (ret)
8350                 return ret;
8351
8352         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8353         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8354             rst_cnt != hdev->rst_stats.reset_cnt) {
8355                 ret = -EBUSY;
8356                 goto init_nic_err;
8357         }
8358
8359         /* Enable nic hw error interrupts */
8360         ret = hclge_config_nic_hw_error(hdev, true);
8361         if (ret) {
8362                 dev_err(&ae_dev->pdev->dev,
8363                         "fail(%d) to enable hw error interrupts\n", ret);
8364                 goto init_nic_err;
8365         }
8366
8367         hnae3_set_client_init_flag(client, ae_dev, 1);
8368
8369         if (netif_msg_drv(&hdev->vport->nic))
8370                 hclge_info_show(hdev);
8371
8372         return ret;
8373
8374 init_nic_err:
8375         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8376         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8377                 msleep(HCLGE_WAIT_RESET_DONE);
8378
8379         client->ops->uninit_instance(&vport->nic, 0);
8380
8381         return ret;
8382 }
8383
8384 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8385                                            struct hclge_vport *vport)
8386 {
8387         struct hnae3_client *client = vport->roce.client;
8388         struct hclge_dev *hdev = ae_dev->priv;
8389         int rst_cnt;
8390         int ret;
8391
8392         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8393             !hdev->nic_client)
8394                 return 0;
8395
8396         client = hdev->roce_client;
8397         ret = hclge_init_roce_base_info(vport);
8398         if (ret)
8399                 return ret;
8400
8401         rst_cnt = hdev->rst_stats.reset_cnt;
8402         ret = client->ops->init_instance(&vport->roce);
8403         if (ret)
8404                 return ret;
8405
8406         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8407         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8408             rst_cnt != hdev->rst_stats.reset_cnt) {
8409                 ret = -EBUSY;
8410                 goto init_roce_err;
8411         }
8412
8413         /* Enable roce ras interrupts */
8414         ret = hclge_config_rocee_ras_interrupt(hdev, true);
8415         if (ret) {
8416                 dev_err(&ae_dev->pdev->dev,
8417                         "fail(%d) to enable roce ras interrupts\n", ret);
8418                 goto init_roce_err;
8419         }
8420
8421         hnae3_set_client_init_flag(client, ae_dev, 1);
8422
8423         return 0;
8424
8425 init_roce_err:
8426         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8427         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8428                 msleep(HCLGE_WAIT_RESET_DONE);
8429
8430         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8431
8432         return ret;
8433 }
8434
8435 static int hclge_init_client_instance(struct hnae3_client *client,
8436                                       struct hnae3_ae_dev *ae_dev)
8437 {
8438         struct hclge_dev *hdev = ae_dev->priv;
8439         struct hclge_vport *vport;
8440         int i, ret;
8441
8442         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8443                 vport = &hdev->vport[i];
8444
8445                 switch (client->type) {
8446                 case HNAE3_CLIENT_KNIC:
8447
8448                         hdev->nic_client = client;
8449                         vport->nic.client = client;
8450                         ret = hclge_init_nic_client_instance(ae_dev, vport);
8451                         if (ret)
8452                                 goto clear_nic;
8453
8454                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8455                         if (ret)
8456                                 goto clear_roce;
8457
8458                         break;
8459                 case HNAE3_CLIENT_ROCE:
8460                         if (hnae3_dev_roce_supported(hdev)) {
8461                                 hdev->roce_client = client;
8462                                 vport->roce.client = client;
8463                         }
8464
8465                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8466                         if (ret)
8467                                 goto clear_roce;
8468
8469                         break;
8470                 default:
8471                         return -EINVAL;
8472                 }
8473         }
8474
8475         return ret;
8476
8477 clear_nic:
8478         hdev->nic_client = NULL;
8479         vport->nic.client = NULL;
8480         return ret;
8481 clear_roce:
8482         hdev->roce_client = NULL;
8483         vport->roce.client = NULL;
8484         return ret;
8485 }
8486
8487 static void hclge_uninit_client_instance(struct hnae3_client *client,
8488                                          struct hnae3_ae_dev *ae_dev)
8489 {
8490         struct hclge_dev *hdev = ae_dev->priv;
8491         struct hclge_vport *vport;
8492         int i;
8493
8494         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8495                 vport = &hdev->vport[i];
8496                 if (hdev->roce_client) {
8497                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8498                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8499                                 msleep(HCLGE_WAIT_RESET_DONE);
8500
8501                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8502                                                                 0);
8503                         hdev->roce_client = NULL;
8504                         vport->roce.client = NULL;
8505                 }
8506                 if (client->type == HNAE3_CLIENT_ROCE)
8507                         return;
8508                 if (hdev->nic_client && client->ops->uninit_instance) {
8509                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8510                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8511                                 msleep(HCLGE_WAIT_RESET_DONE);
8512
8513                         client->ops->uninit_instance(&vport->nic, 0);
8514                         hdev->nic_client = NULL;
8515                         vport->nic.client = NULL;
8516                 }
8517         }
8518 }
8519
8520 static int hclge_pci_init(struct hclge_dev *hdev)
8521 {
8522         struct pci_dev *pdev = hdev->pdev;
8523         struct hclge_hw *hw;
8524         int ret;
8525
8526         ret = pci_enable_device(pdev);
8527         if (ret) {
8528                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8529                 return ret;
8530         }
8531
8532         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8533         if (ret) {
8534                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8535                 if (ret) {
8536                         dev_err(&pdev->dev,
8537                                 "can't set consistent PCI DMA");
8538                         goto err_disable_device;
8539                 }
8540                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8541         }
8542
8543         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8544         if (ret) {
8545                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8546                 goto err_disable_device;
8547         }
8548
8549         pci_set_master(pdev);
8550         hw = &hdev->hw;
8551         hw->io_base = pcim_iomap(pdev, 2, 0);
8552         if (!hw->io_base) {
8553                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8554                 ret = -ENOMEM;
8555                 goto err_clr_master;
8556         }
8557
8558         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8559
8560         return 0;
8561 err_clr_master:
8562         pci_clear_master(pdev);
8563         pci_release_regions(pdev);
8564 err_disable_device:
8565         pci_disable_device(pdev);
8566
8567         return ret;
8568 }
8569
8570 static void hclge_pci_uninit(struct hclge_dev *hdev)
8571 {
8572         struct pci_dev *pdev = hdev->pdev;
8573
8574         pcim_iounmap(pdev, hdev->hw.io_base);
8575         pci_free_irq_vectors(pdev);
8576         pci_clear_master(pdev);
8577         pci_release_mem_regions(pdev);
8578         pci_disable_device(pdev);
8579 }
8580
8581 static void hclge_state_init(struct hclge_dev *hdev)
8582 {
8583         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8584         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8585         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8586         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8587         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8588         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8589 }
8590
8591 static void hclge_state_uninit(struct hclge_dev *hdev)
8592 {
8593         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8594         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8595
8596         if (hdev->service_timer.function)
8597                 del_timer_sync(&hdev->service_timer);
8598         if (hdev->reset_timer.function)
8599                 del_timer_sync(&hdev->reset_timer);
8600         if (hdev->service_task.func)
8601                 cancel_work_sync(&hdev->service_task);
8602         if (hdev->rst_service_task.func)
8603                 cancel_work_sync(&hdev->rst_service_task);
8604         if (hdev->mbx_service_task.func)
8605                 cancel_work_sync(&hdev->mbx_service_task);
8606 }
8607
8608 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8609 {
8610 #define HCLGE_FLR_WAIT_MS       100
8611 #define HCLGE_FLR_WAIT_CNT      50
8612         struct hclge_dev *hdev = ae_dev->priv;
8613         int cnt = 0;
8614
8615         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8616         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8617         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8618         hclge_reset_event(hdev->pdev, NULL);
8619
8620         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8621                cnt++ < HCLGE_FLR_WAIT_CNT)
8622                 msleep(HCLGE_FLR_WAIT_MS);
8623
8624         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8625                 dev_err(&hdev->pdev->dev,
8626                         "flr wait down timeout: %d\n", cnt);
8627 }
8628
8629 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8630 {
8631         struct hclge_dev *hdev = ae_dev->priv;
8632
8633         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8634 }
8635
8636 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
8637 {
8638         u16 i;
8639
8640         for (i = 0; i < hdev->num_alloc_vport; i++) {
8641                 struct hclge_vport *vport = &hdev->vport[i];
8642                 int ret;
8643
8644                  /* Send cmd to clear VF's FUNC_RST_ING */
8645                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
8646                 if (ret)
8647                         dev_warn(&hdev->pdev->dev,
8648                                  "clear vf(%d) rst failed %d!\n",
8649                                  vport->vport_id, ret);
8650         }
8651 }
8652
8653 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8654 {
8655         struct pci_dev *pdev = ae_dev->pdev;
8656         struct hclge_dev *hdev;
8657         int ret;
8658
8659         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8660         if (!hdev) {
8661                 ret = -ENOMEM;
8662                 goto out;
8663         }
8664
8665         hdev->pdev = pdev;
8666         hdev->ae_dev = ae_dev;
8667         hdev->reset_type = HNAE3_NONE_RESET;
8668         hdev->reset_level = HNAE3_FUNC_RESET;
8669         ae_dev->priv = hdev;
8670         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8671
8672         mutex_init(&hdev->vport_lock);
8673         mutex_init(&hdev->vport_cfg_mutex);
8674         spin_lock_init(&hdev->fd_rule_lock);
8675
8676         ret = hclge_pci_init(hdev);
8677         if (ret) {
8678                 dev_err(&pdev->dev, "PCI init failed\n");
8679                 goto out;
8680         }
8681
8682         /* Firmware command queue initialize */
8683         ret = hclge_cmd_queue_init(hdev);
8684         if (ret) {
8685                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8686                 goto err_pci_uninit;
8687         }
8688
8689         /* Firmware command initialize */
8690         ret = hclge_cmd_init(hdev);
8691         if (ret)
8692                 goto err_cmd_uninit;
8693
8694         ret = hclge_get_cap(hdev);
8695         if (ret) {
8696                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8697                         ret);
8698                 goto err_cmd_uninit;
8699         }
8700
8701         ret = hclge_configure(hdev);
8702         if (ret) {
8703                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8704                 goto err_cmd_uninit;
8705         }
8706
8707         ret = hclge_init_msi(hdev);
8708         if (ret) {
8709                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8710                 goto err_cmd_uninit;
8711         }
8712
8713         ret = hclge_misc_irq_init(hdev);
8714         if (ret) {
8715                 dev_err(&pdev->dev,
8716                         "Misc IRQ(vector0) init error, ret = %d.\n",
8717                         ret);
8718                 goto err_msi_uninit;
8719         }
8720
8721         ret = hclge_alloc_tqps(hdev);
8722         if (ret) {
8723                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8724                 goto err_msi_irq_uninit;
8725         }
8726
8727         ret = hclge_alloc_vport(hdev);
8728         if (ret) {
8729                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8730                 goto err_msi_irq_uninit;
8731         }
8732
8733         ret = hclge_map_tqp(hdev);
8734         if (ret) {
8735                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8736                 goto err_msi_irq_uninit;
8737         }
8738
8739         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8740                 ret = hclge_mac_mdio_config(hdev);
8741                 if (ret) {
8742                         dev_err(&hdev->pdev->dev,
8743                                 "mdio config fail ret=%d\n", ret);
8744                         goto err_msi_irq_uninit;
8745                 }
8746         }
8747
8748         ret = hclge_init_umv_space(hdev);
8749         if (ret) {
8750                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8751                 goto err_mdiobus_unreg;
8752         }
8753
8754         ret = hclge_mac_init(hdev);
8755         if (ret) {
8756                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8757                 goto err_mdiobus_unreg;
8758         }
8759
8760         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8761         if (ret) {
8762                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8763                 goto err_mdiobus_unreg;
8764         }
8765
8766         ret = hclge_config_gro(hdev, true);
8767         if (ret)
8768                 goto err_mdiobus_unreg;
8769
8770         ret = hclge_init_vlan_config(hdev);
8771         if (ret) {
8772                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8773                 goto err_mdiobus_unreg;
8774         }
8775
8776         ret = hclge_tm_schd_init(hdev);
8777         if (ret) {
8778                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8779                 goto err_mdiobus_unreg;
8780         }
8781
8782         hclge_rss_init_cfg(hdev);
8783         ret = hclge_rss_init_hw(hdev);
8784         if (ret) {
8785                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8786                 goto err_mdiobus_unreg;
8787         }
8788
8789         ret = init_mgr_tbl(hdev);
8790         if (ret) {
8791                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8792                 goto err_mdiobus_unreg;
8793         }
8794
8795         ret = hclge_init_fd_config(hdev);
8796         if (ret) {
8797                 dev_err(&pdev->dev,
8798                         "fd table init fail, ret=%d\n", ret);
8799                 goto err_mdiobus_unreg;
8800         }
8801
8802         INIT_KFIFO(hdev->mac_tnl_log);
8803
8804         hclge_dcb_ops_set(hdev);
8805
8806         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8807         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8808         INIT_WORK(&hdev->service_task, hclge_service_task);
8809         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8810         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8811
8812         hclge_clear_all_event_cause(hdev);
8813         hclge_clear_resetting_state(hdev);
8814
8815         /* Log and clear the hw errors those already occurred */
8816         hclge_handle_all_hns_hw_errors(ae_dev);
8817
8818         /* request delayed reset for the error recovery because an immediate
8819          * global reset on a PF affecting pending initialization of other PFs
8820          */
8821         if (ae_dev->hw_err_reset_req) {
8822                 enum hnae3_reset_type reset_level;
8823
8824                 reset_level = hclge_get_reset_level(ae_dev,
8825                                                     &ae_dev->hw_err_reset_req);
8826                 hclge_set_def_reset_request(ae_dev, reset_level);
8827                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
8828         }
8829
8830         /* Enable MISC vector(vector0) */
8831         hclge_enable_vector(&hdev->misc_vector, true);
8832
8833         hclge_state_init(hdev);
8834         hdev->last_reset_time = jiffies;
8835
8836         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8837         return 0;
8838
8839 err_mdiobus_unreg:
8840         if (hdev->hw.mac.phydev)
8841                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8842 err_msi_irq_uninit:
8843         hclge_misc_irq_uninit(hdev);
8844 err_msi_uninit:
8845         pci_free_irq_vectors(pdev);
8846 err_cmd_uninit:
8847         hclge_cmd_uninit(hdev);
8848 err_pci_uninit:
8849         pcim_iounmap(pdev, hdev->hw.io_base);
8850         pci_clear_master(pdev);
8851         pci_release_regions(pdev);
8852         pci_disable_device(pdev);
8853 out:
8854         return ret;
8855 }
8856
8857 static void hclge_stats_clear(struct hclge_dev *hdev)
8858 {
8859         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8860 }
8861
8862 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8863 {
8864         struct hclge_vport *vport = hdev->vport;
8865         int i;
8866
8867         for (i = 0; i < hdev->num_alloc_vport; i++) {
8868                 hclge_vport_stop(vport);
8869                 vport++;
8870         }
8871 }
8872
8873 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8874 {
8875         struct hclge_dev *hdev = ae_dev->priv;
8876         struct pci_dev *pdev = ae_dev->pdev;
8877         int ret;
8878
8879         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8880
8881         hclge_stats_clear(hdev);
8882         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8883         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8884
8885         ret = hclge_cmd_init(hdev);
8886         if (ret) {
8887                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8888                 return ret;
8889         }
8890
8891         ret = hclge_map_tqp(hdev);
8892         if (ret) {
8893                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8894                 return ret;
8895         }
8896
8897         hclge_reset_umv_space(hdev);
8898
8899         ret = hclge_mac_init(hdev);
8900         if (ret) {
8901                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8902                 return ret;
8903         }
8904
8905         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8906         if (ret) {
8907                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8908                 return ret;
8909         }
8910
8911         ret = hclge_config_gro(hdev, true);
8912         if (ret)
8913                 return ret;
8914
8915         ret = hclge_init_vlan_config(hdev);
8916         if (ret) {
8917                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8918                 return ret;
8919         }
8920
8921         ret = hclge_tm_init_hw(hdev, true);
8922         if (ret) {
8923                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8924                 return ret;
8925         }
8926
8927         ret = hclge_rss_init_hw(hdev);
8928         if (ret) {
8929                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8930                 return ret;
8931         }
8932
8933         ret = hclge_init_fd_config(hdev);
8934         if (ret) {
8935                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
8936                 return ret;
8937         }
8938
8939         /* Re-enable the hw error interrupts because
8940          * the interrupts get disabled on global reset.
8941          */
8942         ret = hclge_config_nic_hw_error(hdev, true);
8943         if (ret) {
8944                 dev_err(&pdev->dev,
8945                         "fail(%d) to re-enable NIC hw error interrupts\n",
8946                         ret);
8947                 return ret;
8948         }
8949
8950         if (hdev->roce_client) {
8951                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8952                 if (ret) {
8953                         dev_err(&pdev->dev,
8954                                 "fail(%d) to re-enable roce ras interrupts\n",
8955                                 ret);
8956                         return ret;
8957                 }
8958         }
8959
8960         hclge_reset_vport_state(hdev);
8961
8962         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8963                  HCLGE_DRIVER_NAME);
8964
8965         return 0;
8966 }
8967
8968 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8969 {
8970         struct hclge_dev *hdev = ae_dev->priv;
8971         struct hclge_mac *mac = &hdev->hw.mac;
8972
8973         hclge_state_uninit(hdev);
8974
8975         if (mac->phydev)
8976                 mdiobus_unregister(mac->mdio_bus);
8977
8978         hclge_uninit_umv_space(hdev);
8979
8980         /* Disable MISC vector(vector0) */
8981         hclge_enable_vector(&hdev->misc_vector, false);
8982         synchronize_irq(hdev->misc_vector.vector_irq);
8983
8984         /* Disable all hw interrupts */
8985         hclge_config_mac_tnl_int(hdev, false);
8986         hclge_config_nic_hw_error(hdev, false);
8987         hclge_config_rocee_ras_interrupt(hdev, false);
8988
8989         hclge_cmd_uninit(hdev);
8990         hclge_misc_irq_uninit(hdev);
8991         hclge_pci_uninit(hdev);
8992         mutex_destroy(&hdev->vport_lock);
8993         hclge_uninit_vport_mac_table(hdev);
8994         hclge_uninit_vport_vlan_table(hdev);
8995         mutex_destroy(&hdev->vport_cfg_mutex);
8996         ae_dev->priv = NULL;
8997 }
8998
8999 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9000 {
9001         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9002         struct hclge_vport *vport = hclge_get_vport(handle);
9003         struct hclge_dev *hdev = vport->back;
9004
9005         return min_t(u32, hdev->rss_size_max,
9006                      vport->alloc_tqps / kinfo->num_tc);
9007 }
9008
9009 static void hclge_get_channels(struct hnae3_handle *handle,
9010                                struct ethtool_channels *ch)
9011 {
9012         ch->max_combined = hclge_get_max_channels(handle);
9013         ch->other_count = 1;
9014         ch->max_other = 1;
9015         ch->combined_count = handle->kinfo.rss_size;
9016 }
9017
9018 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9019                                         u16 *alloc_tqps, u16 *max_rss_size)
9020 {
9021         struct hclge_vport *vport = hclge_get_vport(handle);
9022         struct hclge_dev *hdev = vport->back;
9023
9024         *alloc_tqps = vport->alloc_tqps;
9025         *max_rss_size = hdev->rss_size_max;
9026 }
9027
9028 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9029                               bool rxfh_configured)
9030 {
9031         struct hclge_vport *vport = hclge_get_vport(handle);
9032         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9033         struct hclge_dev *hdev = vport->back;
9034         int cur_rss_size = kinfo->rss_size;
9035         int cur_tqps = kinfo->num_tqps;
9036         u16 tc_offset[HCLGE_MAX_TC_NUM];
9037         u16 tc_valid[HCLGE_MAX_TC_NUM];
9038         u16 tc_size[HCLGE_MAX_TC_NUM];
9039         u16 roundup_size;
9040         u32 *rss_indir;
9041         unsigned int i;
9042         int ret;
9043
9044         kinfo->req_rss_size = new_tqps_num;
9045
9046         ret = hclge_tm_vport_map_update(hdev);
9047         if (ret) {
9048                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9049                 return ret;
9050         }
9051
9052         roundup_size = roundup_pow_of_two(kinfo->rss_size);
9053         roundup_size = ilog2(roundup_size);
9054         /* Set the RSS TC mode according to the new RSS size */
9055         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9056                 tc_valid[i] = 0;
9057
9058                 if (!(hdev->hw_tc_map & BIT(i)))
9059                         continue;
9060
9061                 tc_valid[i] = 1;
9062                 tc_size[i] = roundup_size;
9063                 tc_offset[i] = kinfo->rss_size * i;
9064         }
9065         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9066         if (ret)
9067                 return ret;
9068
9069         /* RSS indirection table has been configuared by user */
9070         if (rxfh_configured)
9071                 goto out;
9072
9073         /* Reinitializes the rss indirect table according to the new RSS size */
9074         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9075         if (!rss_indir)
9076                 return -ENOMEM;
9077
9078         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9079                 rss_indir[i] = i % kinfo->rss_size;
9080
9081         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9082         if (ret)
9083                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9084                         ret);
9085
9086         kfree(rss_indir);
9087
9088 out:
9089         if (!ret)
9090                 dev_info(&hdev->pdev->dev,
9091                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9092                          cur_rss_size, kinfo->rss_size,
9093                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
9094
9095         return ret;
9096 }
9097
9098 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9099                               u32 *regs_num_64_bit)
9100 {
9101         struct hclge_desc desc;
9102         u32 total_num;
9103         int ret;
9104
9105         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9106         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9107         if (ret) {
9108                 dev_err(&hdev->pdev->dev,
9109                         "Query register number cmd failed, ret = %d.\n", ret);
9110                 return ret;
9111         }
9112
9113         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9114         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9115
9116         total_num = *regs_num_32_bit + *regs_num_64_bit;
9117         if (!total_num)
9118                 return -EINVAL;
9119
9120         return 0;
9121 }
9122
9123 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9124                                  void *data)
9125 {
9126 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9127 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9128
9129         struct hclge_desc *desc;
9130         u32 *reg_val = data;
9131         __le32 *desc_data;
9132         int nodata_num;
9133         int cmd_num;
9134         int i, k, n;
9135         int ret;
9136
9137         if (regs_num == 0)
9138                 return 0;
9139
9140         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9141         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9142                                HCLGE_32_BIT_REG_RTN_DATANUM);
9143         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9144         if (!desc)
9145                 return -ENOMEM;
9146
9147         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9148         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9149         if (ret) {
9150                 dev_err(&hdev->pdev->dev,
9151                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
9152                 kfree(desc);
9153                 return ret;
9154         }
9155
9156         for (i = 0; i < cmd_num; i++) {
9157                 if (i == 0) {
9158                         desc_data = (__le32 *)(&desc[i].data[0]);
9159                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9160                 } else {
9161                         desc_data = (__le32 *)(&desc[i]);
9162                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
9163                 }
9164                 for (k = 0; k < n; k++) {
9165                         *reg_val++ = le32_to_cpu(*desc_data++);
9166
9167                         regs_num--;
9168                         if (!regs_num)
9169                                 break;
9170                 }
9171         }
9172
9173         kfree(desc);
9174         return 0;
9175 }
9176
9177 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9178                                  void *data)
9179 {
9180 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9181 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9182
9183         struct hclge_desc *desc;
9184         u64 *reg_val = data;
9185         __le64 *desc_data;
9186         int nodata_len;
9187         int cmd_num;
9188         int i, k, n;
9189         int ret;
9190
9191         if (regs_num == 0)
9192                 return 0;
9193
9194         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9195         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9196                                HCLGE_64_BIT_REG_RTN_DATANUM);
9197         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9198         if (!desc)
9199                 return -ENOMEM;
9200
9201         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9202         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9203         if (ret) {
9204                 dev_err(&hdev->pdev->dev,
9205                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
9206                 kfree(desc);
9207                 return ret;
9208         }
9209
9210         for (i = 0; i < cmd_num; i++) {
9211                 if (i == 0) {
9212                         desc_data = (__le64 *)(&desc[i].data[0]);
9213                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9214                 } else {
9215                         desc_data = (__le64 *)(&desc[i]);
9216                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
9217                 }
9218                 for (k = 0; k < n; k++) {
9219                         *reg_val++ = le64_to_cpu(*desc_data++);
9220
9221                         regs_num--;
9222                         if (!regs_num)
9223                                 break;
9224                 }
9225         }
9226
9227         kfree(desc);
9228         return 0;
9229 }
9230
9231 #define MAX_SEPARATE_NUM        4
9232 #define SEPARATOR_VALUE         0xFFFFFFFF
9233 #define REG_NUM_PER_LINE        4
9234 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
9235
9236 static int hclge_get_regs_len(struct hnae3_handle *handle)
9237 {
9238         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9239         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9240         struct hclge_vport *vport = hclge_get_vport(handle);
9241         struct hclge_dev *hdev = vport->back;
9242         u32 regs_num_32_bit, regs_num_64_bit;
9243         int ret;
9244
9245         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9246         if (ret) {
9247                 dev_err(&hdev->pdev->dev,
9248                         "Get register number failed, ret = %d.\n", ret);
9249                 return -EOPNOTSUPP;
9250         }
9251
9252         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9253         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9254         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9255         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9256
9257         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9258                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9259                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9260 }
9261
9262 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9263                            void *data)
9264 {
9265         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9266         struct hclge_vport *vport = hclge_get_vport(handle);
9267         struct hclge_dev *hdev = vport->back;
9268         u32 regs_num_32_bit, regs_num_64_bit;
9269         int i, j, reg_um, separator_num;
9270         u32 *reg = data;
9271         int ret;
9272
9273         *version = hdev->fw_version;
9274
9275         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9276         if (ret) {
9277                 dev_err(&hdev->pdev->dev,
9278                         "Get register number failed, ret = %d.\n", ret);
9279                 return;
9280         }
9281
9282         /* fetching per-PF registers valus from PF PCIe register space */
9283         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9284         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9285         for (i = 0; i < reg_um; i++)
9286                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9287         for (i = 0; i < separator_num; i++)
9288                 *reg++ = SEPARATOR_VALUE;
9289
9290         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9291         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9292         for (i = 0; i < reg_um; i++)
9293                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9294         for (i = 0; i < separator_num; i++)
9295                 *reg++ = SEPARATOR_VALUE;
9296
9297         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9298         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9299         for (j = 0; j < kinfo->num_tqps; j++) {
9300                 for (i = 0; i < reg_um; i++)
9301                         *reg++ = hclge_read_dev(&hdev->hw,
9302                                                 ring_reg_addr_list[i] +
9303                                                 0x200 * j);
9304                 for (i = 0; i < separator_num; i++)
9305                         *reg++ = SEPARATOR_VALUE;
9306         }
9307
9308         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9309         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9310         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9311                 for (i = 0; i < reg_um; i++)
9312                         *reg++ = hclge_read_dev(&hdev->hw,
9313                                                 tqp_intr_reg_addr_list[i] +
9314                                                 4 * j);
9315                 for (i = 0; i < separator_num; i++)
9316                         *reg++ = SEPARATOR_VALUE;
9317         }
9318
9319         /* fetching PF common registers values from firmware */
9320         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9321         if (ret) {
9322                 dev_err(&hdev->pdev->dev,
9323                         "Get 32 bit register failed, ret = %d.\n", ret);
9324                 return;
9325         }
9326
9327         reg += regs_num_32_bit;
9328         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9329         if (ret)
9330                 dev_err(&hdev->pdev->dev,
9331                         "Get 64 bit register failed, ret = %d.\n", ret);
9332 }
9333
9334 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9335 {
9336         struct hclge_set_led_state_cmd *req;
9337         struct hclge_desc desc;
9338         int ret;
9339
9340         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9341
9342         req = (struct hclge_set_led_state_cmd *)desc.data;
9343         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9344                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9345
9346         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9347         if (ret)
9348                 dev_err(&hdev->pdev->dev,
9349                         "Send set led state cmd error, ret =%d\n", ret);
9350
9351         return ret;
9352 }
9353
9354 enum hclge_led_status {
9355         HCLGE_LED_OFF,
9356         HCLGE_LED_ON,
9357         HCLGE_LED_NO_CHANGE = 0xFF,
9358 };
9359
9360 static int hclge_set_led_id(struct hnae3_handle *handle,
9361                             enum ethtool_phys_id_state status)
9362 {
9363         struct hclge_vport *vport = hclge_get_vport(handle);
9364         struct hclge_dev *hdev = vport->back;
9365
9366         switch (status) {
9367         case ETHTOOL_ID_ACTIVE:
9368                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9369         case ETHTOOL_ID_INACTIVE:
9370                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9371         default:
9372                 return -EINVAL;
9373         }
9374 }
9375
9376 static void hclge_get_link_mode(struct hnae3_handle *handle,
9377                                 unsigned long *supported,
9378                                 unsigned long *advertising)
9379 {
9380         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9381         struct hclge_vport *vport = hclge_get_vport(handle);
9382         struct hclge_dev *hdev = vport->back;
9383         unsigned int idx = 0;
9384
9385         for (; idx < size; idx++) {
9386                 supported[idx] = hdev->hw.mac.supported[idx];
9387                 advertising[idx] = hdev->hw.mac.advertising[idx];
9388         }
9389 }
9390
9391 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9392 {
9393         struct hclge_vport *vport = hclge_get_vport(handle);
9394         struct hclge_dev *hdev = vport->back;
9395
9396         return hclge_config_gro(hdev, enable);
9397 }
9398
9399 static const struct hnae3_ae_ops hclge_ops = {
9400         .init_ae_dev = hclge_init_ae_dev,
9401         .uninit_ae_dev = hclge_uninit_ae_dev,
9402         .flr_prepare = hclge_flr_prepare,
9403         .flr_done = hclge_flr_done,
9404         .init_client_instance = hclge_init_client_instance,
9405         .uninit_client_instance = hclge_uninit_client_instance,
9406         .map_ring_to_vector = hclge_map_ring_to_vector,
9407         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9408         .get_vector = hclge_get_vector,
9409         .put_vector = hclge_put_vector,
9410         .set_promisc_mode = hclge_set_promisc_mode,
9411         .set_loopback = hclge_set_loopback,
9412         .start = hclge_ae_start,
9413         .stop = hclge_ae_stop,
9414         .client_start = hclge_client_start,
9415         .client_stop = hclge_client_stop,
9416         .get_status = hclge_get_status,
9417         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9418         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9419         .get_media_type = hclge_get_media_type,
9420         .check_port_speed = hclge_check_port_speed,
9421         .get_fec = hclge_get_fec,
9422         .set_fec = hclge_set_fec,
9423         .get_rss_key_size = hclge_get_rss_key_size,
9424         .get_rss_indir_size = hclge_get_rss_indir_size,
9425         .get_rss = hclge_get_rss,
9426         .set_rss = hclge_set_rss,
9427         .set_rss_tuple = hclge_set_rss_tuple,
9428         .get_rss_tuple = hclge_get_rss_tuple,
9429         .get_tc_size = hclge_get_tc_size,
9430         .get_mac_addr = hclge_get_mac_addr,
9431         .set_mac_addr = hclge_set_mac_addr,
9432         .do_ioctl = hclge_do_ioctl,
9433         .add_uc_addr = hclge_add_uc_addr,
9434         .rm_uc_addr = hclge_rm_uc_addr,
9435         .add_mc_addr = hclge_add_mc_addr,
9436         .rm_mc_addr = hclge_rm_mc_addr,
9437         .set_autoneg = hclge_set_autoneg,
9438         .get_autoneg = hclge_get_autoneg,
9439         .restart_autoneg = hclge_restart_autoneg,
9440         .halt_autoneg = hclge_halt_autoneg,
9441         .get_pauseparam = hclge_get_pauseparam,
9442         .set_pauseparam = hclge_set_pauseparam,
9443         .set_mtu = hclge_set_mtu,
9444         .reset_queue = hclge_reset_tqp,
9445         .get_stats = hclge_get_stats,
9446         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9447         .update_stats = hclge_update_stats,
9448         .get_strings = hclge_get_strings,
9449         .get_sset_count = hclge_get_sset_count,
9450         .get_fw_version = hclge_get_fw_version,
9451         .get_mdix_mode = hclge_get_mdix_mode,
9452         .enable_vlan_filter = hclge_enable_vlan_filter,
9453         .set_vlan_filter = hclge_set_vlan_filter,
9454         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9455         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9456         .reset_event = hclge_reset_event,
9457         .get_reset_level = hclge_get_reset_level,
9458         .set_default_reset_request = hclge_set_def_reset_request,
9459         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9460         .set_channels = hclge_set_channels,
9461         .get_channels = hclge_get_channels,
9462         .get_regs_len = hclge_get_regs_len,
9463         .get_regs = hclge_get_regs,
9464         .set_led_id = hclge_set_led_id,
9465         .get_link_mode = hclge_get_link_mode,
9466         .add_fd_entry = hclge_add_fd_entry,
9467         .del_fd_entry = hclge_del_fd_entry,
9468         .del_all_fd_entries = hclge_del_all_fd_entries,
9469         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9470         .get_fd_rule_info = hclge_get_fd_rule_info,
9471         .get_fd_all_rules = hclge_get_all_rules,
9472         .restore_fd_rules = hclge_restore_fd_entries,
9473         .enable_fd = hclge_enable_fd,
9474         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9475         .dbg_run_cmd = hclge_dbg_run_cmd,
9476         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9477         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9478         .ae_dev_resetting = hclge_ae_dev_resetting,
9479         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9480         .set_gro_en = hclge_gro_en,
9481         .get_global_queue_id = hclge_covert_handle_qid_global,
9482         .set_timer_task = hclge_set_timer_task,
9483         .mac_connect_phy = hclge_mac_connect_phy,
9484         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9485         .restore_vlan_table = hclge_restore_vlan_table,
9486 };
9487
9488 static struct hnae3_ae_algo ae_algo = {
9489         .ops = &hclge_ops,
9490         .pdev_id_table = ae_algo_pci_tbl,
9491 };
9492
9493 static int hclge_init(void)
9494 {
9495         pr_info("%s is initializing\n", HCLGE_NAME);
9496
9497         hnae3_register_ae_algo(&ae_algo);
9498
9499         return 0;
9500 }
9501
9502 static void hclge_exit(void)
9503 {
9504         hnae3_unregister_ae_algo(&ae_algo);
9505 }
9506 module_init(hclge_init);
9507 module_exit(hclge_exit);
9508
9509 MODULE_LICENSE("GPL");
9510 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9511 MODULE_DESCRIPTION("HCLGE Driver");
9512 MODULE_VERSION(HCLGE_MOD_VERSION);