net: hns3: some modifications to simplify and optimize code
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33
34 #define HCLGE_RESET_MAX_FAIL_CNT        5
35
36 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
37 static int hclge_init_vlan_config(struct hclge_dev *hdev);
38 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
39 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
40 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
41                                u16 *allocated_size, bool is_alloc);
42 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
43 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
44
45 static struct hnae3_ae_algo ae_algo;
46
47 static const struct pci_device_id ae_algo_pci_tbl[] = {
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
49         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
51         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
52         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
53         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
54         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
55         /* required last entry */
56         {0, }
57 };
58
59 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
60
61 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
62                                          HCLGE_CMDQ_TX_ADDR_H_REG,
63                                          HCLGE_CMDQ_TX_DEPTH_REG,
64                                          HCLGE_CMDQ_TX_TAIL_REG,
65                                          HCLGE_CMDQ_TX_HEAD_REG,
66                                          HCLGE_CMDQ_RX_ADDR_L_REG,
67                                          HCLGE_CMDQ_RX_ADDR_H_REG,
68                                          HCLGE_CMDQ_RX_DEPTH_REG,
69                                          HCLGE_CMDQ_RX_TAIL_REG,
70                                          HCLGE_CMDQ_RX_HEAD_REG,
71                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
72                                          HCLGE_CMDQ_INTR_STS_REG,
73                                          HCLGE_CMDQ_INTR_EN_REG,
74                                          HCLGE_CMDQ_INTR_GEN_REG};
75
76 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
77                                            HCLGE_VECTOR0_OTER_EN_REG,
78                                            HCLGE_MISC_RESET_STS_REG,
79                                            HCLGE_MISC_VECTOR_INT_STS,
80                                            HCLGE_GLOBAL_RESET_REG,
81                                            HCLGE_FUN_RST_ING,
82                                            HCLGE_GRO_EN_REG};
83
84 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
85                                          HCLGE_RING_RX_ADDR_H_REG,
86                                          HCLGE_RING_RX_BD_NUM_REG,
87                                          HCLGE_RING_RX_BD_LENGTH_REG,
88                                          HCLGE_RING_RX_MERGE_EN_REG,
89                                          HCLGE_RING_RX_TAIL_REG,
90                                          HCLGE_RING_RX_HEAD_REG,
91                                          HCLGE_RING_RX_FBD_NUM_REG,
92                                          HCLGE_RING_RX_OFFSET_REG,
93                                          HCLGE_RING_RX_FBD_OFFSET_REG,
94                                          HCLGE_RING_RX_STASH_REG,
95                                          HCLGE_RING_RX_BD_ERR_REG,
96                                          HCLGE_RING_TX_ADDR_L_REG,
97                                          HCLGE_RING_TX_ADDR_H_REG,
98                                          HCLGE_RING_TX_BD_NUM_REG,
99                                          HCLGE_RING_TX_PRIORITY_REG,
100                                          HCLGE_RING_TX_TC_REG,
101                                          HCLGE_RING_TX_MERGE_EN_REG,
102                                          HCLGE_RING_TX_TAIL_REG,
103                                          HCLGE_RING_TX_HEAD_REG,
104                                          HCLGE_RING_TX_FBD_NUM_REG,
105                                          HCLGE_RING_TX_OFFSET_REG,
106                                          HCLGE_RING_TX_EBD_NUM_REG,
107                                          HCLGE_RING_TX_EBD_OFFSET_REG,
108                                          HCLGE_RING_TX_BD_ERR_REG,
109                                          HCLGE_RING_EN_REG};
110
111 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
112                                              HCLGE_TQP_INTR_GL0_REG,
113                                              HCLGE_TQP_INTR_GL1_REG,
114                                              HCLGE_TQP_INTR_GL2_REG,
115                                              HCLGE_TQP_INTR_RL_REG};
116
117 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
118         "App    Loopback test",
119         "Serdes serial Loopback test",
120         "Serdes parallel Loopback test",
121         "Phy    Loopback test"
122 };
123
124 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
125         {"mac_tx_mac_pause_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
127         {"mac_rx_mac_pause_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
129         {"mac_tx_control_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
131         {"mac_rx_control_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
133         {"mac_tx_pfc_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
135         {"mac_tx_pfc_pri0_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
137         {"mac_tx_pfc_pri1_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
139         {"mac_tx_pfc_pri2_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
141         {"mac_tx_pfc_pri3_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
143         {"mac_tx_pfc_pri4_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
145         {"mac_tx_pfc_pri5_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
147         {"mac_tx_pfc_pri6_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
149         {"mac_tx_pfc_pri7_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
151         {"mac_rx_pfc_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
153         {"mac_rx_pfc_pri0_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
155         {"mac_rx_pfc_pri1_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
157         {"mac_rx_pfc_pri2_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
159         {"mac_rx_pfc_pri3_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
161         {"mac_rx_pfc_pri4_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
163         {"mac_rx_pfc_pri5_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
165         {"mac_rx_pfc_pri6_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
167         {"mac_rx_pfc_pri7_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
169         {"mac_tx_total_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
171         {"mac_tx_total_oct_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
173         {"mac_tx_good_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
175         {"mac_tx_bad_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
177         {"mac_tx_good_oct_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
179         {"mac_tx_bad_oct_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
181         {"mac_tx_uni_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
183         {"mac_tx_multi_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
185         {"mac_tx_broad_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
187         {"mac_tx_undersize_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
189         {"mac_tx_oversize_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
191         {"mac_tx_64_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
193         {"mac_tx_65_127_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
195         {"mac_tx_128_255_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
197         {"mac_tx_256_511_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
199         {"mac_tx_512_1023_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
201         {"mac_tx_1024_1518_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
203         {"mac_tx_1519_2047_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
205         {"mac_tx_2048_4095_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
207         {"mac_tx_4096_8191_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
209         {"mac_tx_8192_9216_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
211         {"mac_tx_9217_12287_oct_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
213         {"mac_tx_12288_16383_oct_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
215         {"mac_tx_1519_max_good_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
217         {"mac_tx_1519_max_bad_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
219         {"mac_rx_total_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
221         {"mac_rx_total_oct_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
223         {"mac_rx_good_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
225         {"mac_rx_bad_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
227         {"mac_rx_good_oct_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
229         {"mac_rx_bad_oct_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
231         {"mac_rx_uni_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
233         {"mac_rx_multi_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
235         {"mac_rx_broad_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
237         {"mac_rx_undersize_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
239         {"mac_rx_oversize_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
241         {"mac_rx_64_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
243         {"mac_rx_65_127_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
245         {"mac_rx_128_255_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
247         {"mac_rx_256_511_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
249         {"mac_rx_512_1023_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
251         {"mac_rx_1024_1518_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
253         {"mac_rx_1519_2047_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
255         {"mac_rx_2048_4095_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
257         {"mac_rx_4096_8191_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
259         {"mac_rx_8192_9216_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
261         {"mac_rx_9217_12287_oct_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
263         {"mac_rx_12288_16383_oct_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
265         {"mac_rx_1519_max_good_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
267         {"mac_rx_1519_max_bad_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
269
270         {"mac_tx_fragment_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
272         {"mac_tx_undermin_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
274         {"mac_tx_jabber_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
276         {"mac_tx_err_all_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
278         {"mac_tx_from_app_good_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
280         {"mac_tx_from_app_bad_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
282         {"mac_rx_fragment_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
284         {"mac_rx_undermin_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
286         {"mac_rx_jabber_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
288         {"mac_rx_fcs_err_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
290         {"mac_rx_send_app_good_pkt_num",
291                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
292         {"mac_rx_send_app_bad_pkt_num",
293                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
294 };
295
296 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
297         {
298                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
299                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
300                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
301                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
302                 .i_port_bitmap = 0x1,
303         },
304 };
305
306 static const u8 hclge_hash_key[] = {
307         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
308         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
309         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
310         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
311         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
312 };
313
314 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
315 {
316 #define HCLGE_MAC_CMD_NUM 21
317
318         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
319         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
320         __le64 *desc_data;
321         int i, k, n;
322         int ret;
323
324         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
325         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
326         if (ret) {
327                 dev_err(&hdev->pdev->dev,
328                         "Get MAC pkt stats fail, status = %d.\n", ret);
329
330                 return ret;
331         }
332
333         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
334                 /* for special opcode 0032, only the first desc has the head */
335                 if (unlikely(i == 0)) {
336                         desc_data = (__le64 *)(&desc[i].data[0]);
337                         n = HCLGE_RD_FIRST_STATS_NUM;
338                 } else {
339                         desc_data = (__le64 *)(&desc[i]);
340                         n = HCLGE_RD_OTHER_STATS_NUM;
341                 }
342
343                 for (k = 0; k < n; k++) {
344                         *data += le64_to_cpu(*desc_data);
345                         data++;
346                         desc_data++;
347                 }
348         }
349
350         return 0;
351 }
352
353 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
354 {
355         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
356         struct hclge_desc *desc;
357         __le64 *desc_data;
358         u16 i, k, n;
359         int ret;
360
361         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
362         if (!desc)
363                 return -ENOMEM;
364         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
365         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
366         if (ret) {
367                 kfree(desc);
368                 return ret;
369         }
370
371         for (i = 0; i < desc_num; i++) {
372                 /* for special opcode 0034, only the first desc has the head */
373                 if (i == 0) {
374                         desc_data = (__le64 *)(&desc[i].data[0]);
375                         n = HCLGE_RD_FIRST_STATS_NUM;
376                 } else {
377                         desc_data = (__le64 *)(&desc[i]);
378                         n = HCLGE_RD_OTHER_STATS_NUM;
379                 }
380
381                 for (k = 0; k < n; k++) {
382                         *data += le64_to_cpu(*desc_data);
383                         data++;
384                         desc_data++;
385                 }
386         }
387
388         kfree(desc);
389
390         return 0;
391 }
392
393 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
394 {
395         struct hclge_desc desc;
396         __le32 *desc_data;
397         u32 reg_num;
398         int ret;
399
400         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
401         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
402         if (ret)
403                 return ret;
404
405         desc_data = (__le32 *)(&desc.data[0]);
406         reg_num = le32_to_cpu(*desc_data);
407
408         *desc_num = 1 + ((reg_num - 3) >> 2) +
409                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
410
411         return 0;
412 }
413
414 static int hclge_mac_update_stats(struct hclge_dev *hdev)
415 {
416         u32 desc_num;
417         int ret;
418
419         ret = hclge_mac_query_reg_num(hdev, &desc_num);
420
421         /* The firmware supports the new statistics acquisition method */
422         if (!ret)
423                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
424         else if (ret == -EOPNOTSUPP)
425                 ret = hclge_mac_update_stats_defective(hdev);
426         else
427                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
428
429         return ret;
430 }
431
432 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
433 {
434         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
435         struct hclge_vport *vport = hclge_get_vport(handle);
436         struct hclge_dev *hdev = vport->back;
437         struct hnae3_queue *queue;
438         struct hclge_desc desc[1];
439         struct hclge_tqp *tqp;
440         int ret, i;
441
442         for (i = 0; i < kinfo->num_tqps; i++) {
443                 queue = handle->kinfo.tqp[i];
444                 tqp = container_of(queue, struct hclge_tqp, q);
445                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
446                 hclge_cmd_setup_basic_desc(&desc[0],
447                                            HCLGE_OPC_QUERY_RX_STATUS,
448                                            true);
449
450                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
451                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
452                 if (ret) {
453                         dev_err(&hdev->pdev->dev,
454                                 "Query tqp stat fail, status = %d,queue = %d\n",
455                                 ret,    i);
456                         return ret;
457                 }
458                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
459                         le32_to_cpu(desc[0].data[1]);
460         }
461
462         for (i = 0; i < kinfo->num_tqps; i++) {
463                 queue = handle->kinfo.tqp[i];
464                 tqp = container_of(queue, struct hclge_tqp, q);
465                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
466                 hclge_cmd_setup_basic_desc(&desc[0],
467                                            HCLGE_OPC_QUERY_TX_STATUS,
468                                            true);
469
470                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
471                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
472                 if (ret) {
473                         dev_err(&hdev->pdev->dev,
474                                 "Query tqp stat fail, status = %d,queue = %d\n",
475                                 ret, i);
476                         return ret;
477                 }
478                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
479                         le32_to_cpu(desc[0].data[1]);
480         }
481
482         return 0;
483 }
484
485 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
486 {
487         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
488         struct hclge_tqp *tqp;
489         u64 *buff = data;
490         int i;
491
492         for (i = 0; i < kinfo->num_tqps; i++) {
493                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
494                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
495         }
496
497         for (i = 0; i < kinfo->num_tqps; i++) {
498                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
499                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
500         }
501
502         return buff;
503 }
504
505 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
506 {
507         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
508
509         return kinfo->num_tqps * (2);
510 }
511
512 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
513 {
514         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
515         u8 *buff = data;
516         int i = 0;
517
518         for (i = 0; i < kinfo->num_tqps; i++) {
519                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
520                         struct hclge_tqp, q);
521                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
522                          tqp->index);
523                 buff = buff + ETH_GSTRING_LEN;
524         }
525
526         for (i = 0; i < kinfo->num_tqps; i++) {
527                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
528                         struct hclge_tqp, q);
529                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
530                          tqp->index);
531                 buff = buff + ETH_GSTRING_LEN;
532         }
533
534         return buff;
535 }
536
537 static u64 *hclge_comm_get_stats(void *comm_stats,
538                                  const struct hclge_comm_stats_str strs[],
539                                  int size, u64 *data)
540 {
541         u64 *buf = data;
542         u32 i;
543
544         for (i = 0; i < size; i++)
545                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
546
547         return buf + size;
548 }
549
550 static u8 *hclge_comm_get_strings(u32 stringset,
551                                   const struct hclge_comm_stats_str strs[],
552                                   int size, u8 *data)
553 {
554         char *buff = (char *)data;
555         u32 i;
556
557         if (stringset != ETH_SS_STATS)
558                 return buff;
559
560         for (i = 0; i < size; i++) {
561                 snprintf(buff, ETH_GSTRING_LEN,
562                          strs[i].desc);
563                 buff = buff + ETH_GSTRING_LEN;
564         }
565
566         return (u8 *)buff;
567 }
568
569 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
570 {
571         struct hnae3_handle *handle;
572         int status;
573
574         handle = &hdev->vport[0].nic;
575         if (handle->client) {
576                 status = hclge_tqps_update_stats(handle);
577                 if (status) {
578                         dev_err(&hdev->pdev->dev,
579                                 "Update TQPS stats fail, status = %d.\n",
580                                 status);
581                 }
582         }
583
584         status = hclge_mac_update_stats(hdev);
585         if (status)
586                 dev_err(&hdev->pdev->dev,
587                         "Update MAC stats fail, status = %d.\n", status);
588 }
589
590 static void hclge_update_stats(struct hnae3_handle *handle,
591                                struct net_device_stats *net_stats)
592 {
593         struct hclge_vport *vport = hclge_get_vport(handle);
594         struct hclge_dev *hdev = vport->back;
595         int status;
596
597         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
598                 return;
599
600         status = hclge_mac_update_stats(hdev);
601         if (status)
602                 dev_err(&hdev->pdev->dev,
603                         "Update MAC stats fail, status = %d.\n",
604                         status);
605
606         status = hclge_tqps_update_stats(handle);
607         if (status)
608                 dev_err(&hdev->pdev->dev,
609                         "Update TQPS stats fail, status = %d.\n",
610                         status);
611
612         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
613 }
614
615 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
616 {
617 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
618                 HNAE3_SUPPORT_PHY_LOOPBACK |\
619                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
620                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
621
622         struct hclge_vport *vport = hclge_get_vport(handle);
623         struct hclge_dev *hdev = vport->back;
624         int count = 0;
625
626         /* Loopback test support rules:
627          * mac: only GE mode support
628          * serdes: all mac mode will support include GE/XGE/LGE/CGE
629          * phy: only support when phy device exist on board
630          */
631         if (stringset == ETH_SS_TEST) {
632                 /* clear loopback bit flags at first */
633                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
634                 if (hdev->pdev->revision >= 0x21 ||
635                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
636                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
637                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
638                         count += 1;
639                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
640                 }
641
642                 count += 2;
643                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
644                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
645         } else if (stringset == ETH_SS_STATS) {
646                 count = ARRAY_SIZE(g_mac_stats_string) +
647                         hclge_tqps_get_sset_count(handle, stringset);
648         }
649
650         return count;
651 }
652
653 static void hclge_get_strings(struct hnae3_handle *handle,
654                               u32 stringset,
655                               u8 *data)
656 {
657         u8 *p = (char *)data;
658         int size;
659
660         if (stringset == ETH_SS_STATS) {
661                 size = ARRAY_SIZE(g_mac_stats_string);
662                 p = hclge_comm_get_strings(stringset,
663                                            g_mac_stats_string,
664                                            size,
665                                            p);
666                 p = hclge_tqps_get_strings(handle, p);
667         } else if (stringset == ETH_SS_TEST) {
668                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
669                         memcpy(p,
670                                hns3_nic_test_strs[HNAE3_LOOP_APP],
671                                ETH_GSTRING_LEN);
672                         p += ETH_GSTRING_LEN;
673                 }
674                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
675                         memcpy(p,
676                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
677                                ETH_GSTRING_LEN);
678                         p += ETH_GSTRING_LEN;
679                 }
680                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
681                         memcpy(p,
682                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
683                                ETH_GSTRING_LEN);
684                         p += ETH_GSTRING_LEN;
685                 }
686                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
687                         memcpy(p,
688                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
689                                ETH_GSTRING_LEN);
690                         p += ETH_GSTRING_LEN;
691                 }
692         }
693 }
694
695 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
696 {
697         struct hclge_vport *vport = hclge_get_vport(handle);
698         struct hclge_dev *hdev = vport->back;
699         u64 *p;
700
701         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
702                                  g_mac_stats_string,
703                                  ARRAY_SIZE(g_mac_stats_string),
704                                  data);
705         p = hclge_tqps_get_stats(handle, p);
706 }
707
708 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
709                                      u64 *rx_cnt)
710 {
711         struct hclge_vport *vport = hclge_get_vport(handle);
712         struct hclge_dev *hdev = vport->back;
713
714         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
715         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
716 }
717
718 static int hclge_parse_func_status(struct hclge_dev *hdev,
719                                    struct hclge_func_status_cmd *status)
720 {
721         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
722                 return -EINVAL;
723
724         /* Set the pf to main pf */
725         if (status->pf_state & HCLGE_PF_STATE_MAIN)
726                 hdev->flag |= HCLGE_FLAG_MAIN;
727         else
728                 hdev->flag &= ~HCLGE_FLAG_MAIN;
729
730         return 0;
731 }
732
733 static int hclge_query_function_status(struct hclge_dev *hdev)
734 {
735 #define HCLGE_QUERY_MAX_CNT     5
736
737         struct hclge_func_status_cmd *req;
738         struct hclge_desc desc;
739         int timeout = 0;
740         int ret;
741
742         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
743         req = (struct hclge_func_status_cmd *)desc.data;
744
745         do {
746                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
747                 if (ret) {
748                         dev_err(&hdev->pdev->dev,
749                                 "query function status failed %d.\n",
750                                 ret);
751
752                         return ret;
753                 }
754
755                 /* Check pf reset is done */
756                 if (req->pf_state)
757                         break;
758                 usleep_range(1000, 2000);
759         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
760
761         ret = hclge_parse_func_status(hdev, req);
762
763         return ret;
764 }
765
766 static int hclge_query_pf_resource(struct hclge_dev *hdev)
767 {
768         struct hclge_pf_res_cmd *req;
769         struct hclge_desc desc;
770         int ret;
771
772         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
773         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
774         if (ret) {
775                 dev_err(&hdev->pdev->dev,
776                         "query pf resource failed %d.\n", ret);
777                 return ret;
778         }
779
780         req = (struct hclge_pf_res_cmd *)desc.data;
781         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
782         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
783
784         if (req->tx_buf_size)
785                 hdev->tx_buf_size =
786                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
787         else
788                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
789
790         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
791
792         if (req->dv_buf_size)
793                 hdev->dv_buf_size =
794                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
795         else
796                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
797
798         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
799
800         if (hnae3_dev_roce_supported(hdev)) {
801                 hdev->roce_base_msix_offset =
802                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
803                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
804                 hdev->num_roce_msi =
805                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
806                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
807
808                 /* PF should have NIC vectors and Roce vectors,
809                  * NIC vectors are queued before Roce vectors.
810                  */
811                 hdev->num_msi = hdev->num_roce_msi  +
812                                 hdev->roce_base_msix_offset;
813         } else {
814                 hdev->num_msi =
815                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
816                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
817         }
818
819         return 0;
820 }
821
822 static int hclge_parse_speed(int speed_cmd, int *speed)
823 {
824         switch (speed_cmd) {
825         case 6:
826                 *speed = HCLGE_MAC_SPEED_10M;
827                 break;
828         case 7:
829                 *speed = HCLGE_MAC_SPEED_100M;
830                 break;
831         case 0:
832                 *speed = HCLGE_MAC_SPEED_1G;
833                 break;
834         case 1:
835                 *speed = HCLGE_MAC_SPEED_10G;
836                 break;
837         case 2:
838                 *speed = HCLGE_MAC_SPEED_25G;
839                 break;
840         case 3:
841                 *speed = HCLGE_MAC_SPEED_40G;
842                 break;
843         case 4:
844                 *speed = HCLGE_MAC_SPEED_50G;
845                 break;
846         case 5:
847                 *speed = HCLGE_MAC_SPEED_100G;
848                 break;
849         default:
850                 return -EINVAL;
851         }
852
853         return 0;
854 }
855
856 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
857 {
858         struct hclge_vport *vport = hclge_get_vport(handle);
859         struct hclge_dev *hdev = vport->back;
860         u32 speed_ability = hdev->hw.mac.speed_ability;
861         u32 speed_bit = 0;
862
863         switch (speed) {
864         case HCLGE_MAC_SPEED_10M:
865                 speed_bit = HCLGE_SUPPORT_10M_BIT;
866                 break;
867         case HCLGE_MAC_SPEED_100M:
868                 speed_bit = HCLGE_SUPPORT_100M_BIT;
869                 break;
870         case HCLGE_MAC_SPEED_1G:
871                 speed_bit = HCLGE_SUPPORT_1G_BIT;
872                 break;
873         case HCLGE_MAC_SPEED_10G:
874                 speed_bit = HCLGE_SUPPORT_10G_BIT;
875                 break;
876         case HCLGE_MAC_SPEED_25G:
877                 speed_bit = HCLGE_SUPPORT_25G_BIT;
878                 break;
879         case HCLGE_MAC_SPEED_40G:
880                 speed_bit = HCLGE_SUPPORT_40G_BIT;
881                 break;
882         case HCLGE_MAC_SPEED_50G:
883                 speed_bit = HCLGE_SUPPORT_50G_BIT;
884                 break;
885         case HCLGE_MAC_SPEED_100G:
886                 speed_bit = HCLGE_SUPPORT_100G_BIT;
887                 break;
888         default:
889                 return -EINVAL;
890         }
891
892         if (speed_bit & speed_ability)
893                 return 0;
894
895         return -EINVAL;
896 }
897
898 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
899 {
900         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
901                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
902                                  mac->supported);
903         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
904                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
905                                  mac->supported);
906         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
907                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
908                                  mac->supported);
909         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
910                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
911                                  mac->supported);
912         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
913                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
914                                  mac->supported);
915 }
916
917 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
918 {
919         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
920                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
921                                  mac->supported);
922         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
923                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
924                                  mac->supported);
925         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
926                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
927                                  mac->supported);
928         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
929                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
930                                  mac->supported);
931         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
932                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
933                                  mac->supported);
934 }
935
936 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
937 {
938         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
939                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
940                                  mac->supported);
941         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
942                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
943                                  mac->supported);
944         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
945                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
946                                  mac->supported);
947         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
948                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
949                                  mac->supported);
950         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
951                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
952                                  mac->supported);
953 }
954
955 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
956 {
957         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
958                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
959                                  mac->supported);
960         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
961                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
962                                  mac->supported);
963         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
964                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
965                                  mac->supported);
966         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
967                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
968                                  mac->supported);
969         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
970                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
971                                  mac->supported);
972         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
973                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
974                                  mac->supported);
975 }
976
977 static void hclge_convert_setting_fec(struct hclge_mac *mac)
978 {
979         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
980         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
981
982         switch (mac->speed) {
983         case HCLGE_MAC_SPEED_10G:
984         case HCLGE_MAC_SPEED_40G:
985                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
986                                  mac->supported);
987                 mac->fec_ability =
988                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
989                 break;
990         case HCLGE_MAC_SPEED_25G:
991         case HCLGE_MAC_SPEED_50G:
992                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
993                                  mac->supported);
994                 mac->fec_ability =
995                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
996                         BIT(HNAE3_FEC_AUTO);
997                 break;
998         case HCLGE_MAC_SPEED_100G:
999                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1000                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1001                 break;
1002         default:
1003                 mac->fec_ability = 0;
1004                 break;
1005         }
1006 }
1007
1008 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1009                                         u8 speed_ability)
1010 {
1011         struct hclge_mac *mac = &hdev->hw.mac;
1012
1013         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1014                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1015                                  mac->supported);
1016
1017         hclge_convert_setting_sr(mac, speed_ability);
1018         hclge_convert_setting_lr(mac, speed_ability);
1019         hclge_convert_setting_cr(mac, speed_ability);
1020         if (hdev->pdev->revision >= 0x21)
1021                 hclge_convert_setting_fec(mac);
1022
1023         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1024         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1025         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1026 }
1027
1028 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1029                                             u8 speed_ability)
1030 {
1031         struct hclge_mac *mac = &hdev->hw.mac;
1032
1033         hclge_convert_setting_kr(mac, speed_ability);
1034         if (hdev->pdev->revision >= 0x21)
1035                 hclge_convert_setting_fec(mac);
1036         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1037         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1038         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1039 }
1040
1041 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1042                                          u8 speed_ability)
1043 {
1044         unsigned long *supported = hdev->hw.mac.supported;
1045
1046         /* default to support all speed for GE port */
1047         if (!speed_ability)
1048                 speed_ability = HCLGE_SUPPORT_GE;
1049
1050         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1052                                  supported);
1053
1054         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1055                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1056                                  supported);
1057                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1058                                  supported);
1059         }
1060
1061         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1062                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1063                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1064         }
1065
1066         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1067         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1068         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1069 }
1070
1071 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1072 {
1073         u8 media_type = hdev->hw.mac.media_type;
1074
1075         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1076                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1077         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1078                 hclge_parse_copper_link_mode(hdev, speed_ability);
1079         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1080                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1081 }
1082 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1083 {
1084         struct hclge_cfg_param_cmd *req;
1085         u64 mac_addr_tmp_high;
1086         u64 mac_addr_tmp;
1087         int i;
1088
1089         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1090
1091         /* get the configuration */
1092         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1093                                               HCLGE_CFG_VMDQ_M,
1094                                               HCLGE_CFG_VMDQ_S);
1095         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1096                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1097         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1098                                             HCLGE_CFG_TQP_DESC_N_M,
1099                                             HCLGE_CFG_TQP_DESC_N_S);
1100
1101         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102                                         HCLGE_CFG_PHY_ADDR_M,
1103                                         HCLGE_CFG_PHY_ADDR_S);
1104         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1105                                           HCLGE_CFG_MEDIA_TP_M,
1106                                           HCLGE_CFG_MEDIA_TP_S);
1107         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1108                                           HCLGE_CFG_RX_BUF_LEN_M,
1109                                           HCLGE_CFG_RX_BUF_LEN_S);
1110         /* get mac_address */
1111         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1112         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113                                             HCLGE_CFG_MAC_ADDR_H_M,
1114                                             HCLGE_CFG_MAC_ADDR_H_S);
1115
1116         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1117
1118         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1119                                              HCLGE_CFG_DEFAULT_SPEED_M,
1120                                              HCLGE_CFG_DEFAULT_SPEED_S);
1121         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1122                                             HCLGE_CFG_RSS_SIZE_M,
1123                                             HCLGE_CFG_RSS_SIZE_S);
1124
1125         for (i = 0; i < ETH_ALEN; i++)
1126                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1127
1128         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1129         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1130
1131         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1132                                              HCLGE_CFG_SPEED_ABILITY_M,
1133                                              HCLGE_CFG_SPEED_ABILITY_S);
1134         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1135                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1136                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1137         if (!cfg->umv_space)
1138                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1139 }
1140
1141 /* hclge_get_cfg: query the static parameter from flash
1142  * @hdev: pointer to struct hclge_dev
1143  * @hcfg: the config structure to be getted
1144  */
1145 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1146 {
1147         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1148         struct hclge_cfg_param_cmd *req;
1149         int i, ret;
1150
1151         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1152                 u32 offset = 0;
1153
1154                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1155                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1156                                            true);
1157                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1158                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1159                 /* Len should be united by 4 bytes when send to hardware */
1160                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1161                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1162                 req->offset = cpu_to_le32(offset);
1163         }
1164
1165         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1166         if (ret) {
1167                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1168                 return ret;
1169         }
1170
1171         hclge_parse_cfg(hcfg, desc);
1172
1173         return 0;
1174 }
1175
1176 static int hclge_get_cap(struct hclge_dev *hdev)
1177 {
1178         int ret;
1179
1180         ret = hclge_query_function_status(hdev);
1181         if (ret) {
1182                 dev_err(&hdev->pdev->dev,
1183                         "query function status error %d.\n", ret);
1184                 return ret;
1185         }
1186
1187         /* get pf resource */
1188         ret = hclge_query_pf_resource(hdev);
1189         if (ret)
1190                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1191
1192         return ret;
1193 }
1194
1195 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1196 {
1197 #define HCLGE_MIN_TX_DESC       64
1198 #define HCLGE_MIN_RX_DESC       64
1199
1200         if (!is_kdump_kernel())
1201                 return;
1202
1203         dev_info(&hdev->pdev->dev,
1204                  "Running kdump kernel. Using minimal resources\n");
1205
1206         /* minimal queue pairs equals to the number of vports */
1207         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1208         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1209         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1210 }
1211
1212 static int hclge_configure(struct hclge_dev *hdev)
1213 {
1214         struct hclge_cfg cfg;
1215         int ret, i;
1216
1217         ret = hclge_get_cfg(hdev, &cfg);
1218         if (ret) {
1219                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1220                 return ret;
1221         }
1222
1223         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1224         hdev->base_tqp_pid = 0;
1225         hdev->rss_size_max = cfg.rss_size_max;
1226         hdev->rx_buf_len = cfg.rx_buf_len;
1227         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1228         hdev->hw.mac.media_type = cfg.media_type;
1229         hdev->hw.mac.phy_addr = cfg.phy_addr;
1230         hdev->num_tx_desc = cfg.tqp_desc_num;
1231         hdev->num_rx_desc = cfg.tqp_desc_num;
1232         hdev->tm_info.num_pg = 1;
1233         hdev->tc_max = cfg.tc_num;
1234         hdev->tm_info.hw_pfc_map = 0;
1235         hdev->wanted_umv_size = cfg.umv_space;
1236
1237         if (hnae3_dev_fd_supported(hdev)) {
1238                 hdev->fd_en = true;
1239                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1240         }
1241
1242         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1243         if (ret) {
1244                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1245                 return ret;
1246         }
1247
1248         hclge_parse_link_mode(hdev, cfg.speed_ability);
1249
1250         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1251             (hdev->tc_max < 1)) {
1252                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1253                          hdev->tc_max);
1254                 hdev->tc_max = 1;
1255         }
1256
1257         /* Dev does not support DCB */
1258         if (!hnae3_dev_dcb_supported(hdev)) {
1259                 hdev->tc_max = 1;
1260                 hdev->pfc_max = 0;
1261         } else {
1262                 hdev->pfc_max = hdev->tc_max;
1263         }
1264
1265         hdev->tm_info.num_tc = 1;
1266
1267         /* Currently not support uncontiuous tc */
1268         for (i = 0; i < hdev->tm_info.num_tc; i++)
1269                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1270
1271         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1272
1273         hclge_init_kdump_kernel_config(hdev);
1274
1275         return ret;
1276 }
1277
1278 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1279                             int tso_mss_max)
1280 {
1281         struct hclge_cfg_tso_status_cmd *req;
1282         struct hclge_desc desc;
1283         u16 tso_mss;
1284
1285         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1286
1287         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1288
1289         tso_mss = 0;
1290         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1291                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1292         req->tso_mss_min = cpu_to_le16(tso_mss);
1293
1294         tso_mss = 0;
1295         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1296                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1297         req->tso_mss_max = cpu_to_le16(tso_mss);
1298
1299         return hclge_cmd_send(&hdev->hw, &desc, 1);
1300 }
1301
1302 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1303 {
1304         struct hclge_cfg_gro_status_cmd *req;
1305         struct hclge_desc desc;
1306         int ret;
1307
1308         if (!hnae3_dev_gro_supported(hdev))
1309                 return 0;
1310
1311         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1312         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1313
1314         req->gro_en = cpu_to_le16(en ? 1 : 0);
1315
1316         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1317         if (ret)
1318                 dev_err(&hdev->pdev->dev,
1319                         "GRO hardware config cmd failed, ret = %d\n", ret);
1320
1321         return ret;
1322 }
1323
1324 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1325 {
1326         struct hclge_tqp *tqp;
1327         int i;
1328
1329         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1330                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1331         if (!hdev->htqp)
1332                 return -ENOMEM;
1333
1334         tqp = hdev->htqp;
1335
1336         for (i = 0; i < hdev->num_tqps; i++) {
1337                 tqp->dev = &hdev->pdev->dev;
1338                 tqp->index = i;
1339
1340                 tqp->q.ae_algo = &ae_algo;
1341                 tqp->q.buf_size = hdev->rx_buf_len;
1342                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1343                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1344                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1345                         i * HCLGE_TQP_REG_SIZE;
1346
1347                 tqp++;
1348         }
1349
1350         return 0;
1351 }
1352
1353 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1354                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1355 {
1356         struct hclge_tqp_map_cmd *req;
1357         struct hclge_desc desc;
1358         int ret;
1359
1360         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1361
1362         req = (struct hclge_tqp_map_cmd *)desc.data;
1363         req->tqp_id = cpu_to_le16(tqp_pid);
1364         req->tqp_vf = func_id;
1365         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1366                         1 << HCLGE_TQP_MAP_EN_B;
1367         req->tqp_vid = cpu_to_le16(tqp_vid);
1368
1369         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1370         if (ret)
1371                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1372
1373         return ret;
1374 }
1375
1376 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1377 {
1378         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1379         struct hclge_dev *hdev = vport->back;
1380         int i, alloced;
1381
1382         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1383              alloced < num_tqps; i++) {
1384                 if (!hdev->htqp[i].alloced) {
1385                         hdev->htqp[i].q.handle = &vport->nic;
1386                         hdev->htqp[i].q.tqp_index = alloced;
1387                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1388                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1389                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1390                         hdev->htqp[i].alloced = true;
1391                         alloced++;
1392                 }
1393         }
1394         vport->alloc_tqps = alloced;
1395         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1396                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1397
1398         return 0;
1399 }
1400
1401 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1402                             u16 num_tx_desc, u16 num_rx_desc)
1403
1404 {
1405         struct hnae3_handle *nic = &vport->nic;
1406         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1407         struct hclge_dev *hdev = vport->back;
1408         int ret;
1409
1410         kinfo->num_tx_desc = num_tx_desc;
1411         kinfo->num_rx_desc = num_rx_desc;
1412
1413         kinfo->rx_buf_len = hdev->rx_buf_len;
1414
1415         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1416                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1417         if (!kinfo->tqp)
1418                 return -ENOMEM;
1419
1420         ret = hclge_assign_tqp(vport, num_tqps);
1421         if (ret)
1422                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1423
1424         return ret;
1425 }
1426
1427 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1428                                   struct hclge_vport *vport)
1429 {
1430         struct hnae3_handle *nic = &vport->nic;
1431         struct hnae3_knic_private_info *kinfo;
1432         u16 i;
1433
1434         kinfo = &nic->kinfo;
1435         for (i = 0; i < vport->alloc_tqps; i++) {
1436                 struct hclge_tqp *q =
1437                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1438                 bool is_pf;
1439                 int ret;
1440
1441                 is_pf = !(vport->vport_id);
1442                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1443                                              i, is_pf);
1444                 if (ret)
1445                         return ret;
1446         }
1447
1448         return 0;
1449 }
1450
1451 static int hclge_map_tqp(struct hclge_dev *hdev)
1452 {
1453         struct hclge_vport *vport = hdev->vport;
1454         u16 i, num_vport;
1455
1456         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1457         for (i = 0; i < num_vport; i++) {
1458                 int ret;
1459
1460                 ret = hclge_map_tqp_to_vport(hdev, vport);
1461                 if (ret)
1462                         return ret;
1463
1464                 vport++;
1465         }
1466
1467         return 0;
1468 }
1469
1470 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1471 {
1472         struct hnae3_handle *nic = &vport->nic;
1473         struct hclge_dev *hdev = vport->back;
1474         int ret;
1475
1476         nic->pdev = hdev->pdev;
1477         nic->ae_algo = &ae_algo;
1478         nic->numa_node_mask = hdev->numa_node_mask;
1479
1480         ret = hclge_knic_setup(vport, num_tqps,
1481                                hdev->num_tx_desc, hdev->num_rx_desc);
1482         if (ret)
1483                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1484
1485         return ret;
1486 }
1487
1488 static int hclge_alloc_vport(struct hclge_dev *hdev)
1489 {
1490         struct pci_dev *pdev = hdev->pdev;
1491         struct hclge_vport *vport;
1492         u32 tqp_main_vport;
1493         u32 tqp_per_vport;
1494         int num_vport, i;
1495         int ret;
1496
1497         /* We need to alloc a vport for main NIC of PF */
1498         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1499
1500         if (hdev->num_tqps < num_vport) {
1501                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1502                         hdev->num_tqps, num_vport);
1503                 return -EINVAL;
1504         }
1505
1506         /* Alloc the same number of TQPs for every vport */
1507         tqp_per_vport = hdev->num_tqps / num_vport;
1508         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1509
1510         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1511                              GFP_KERNEL);
1512         if (!vport)
1513                 return -ENOMEM;
1514
1515         hdev->vport = vport;
1516         hdev->num_alloc_vport = num_vport;
1517
1518         if (IS_ENABLED(CONFIG_PCI_IOV))
1519                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1520
1521         for (i = 0; i < num_vport; i++) {
1522                 vport->back = hdev;
1523                 vport->vport_id = i;
1524                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1525                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1526                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1527                 INIT_LIST_HEAD(&vport->vlan_list);
1528                 INIT_LIST_HEAD(&vport->uc_mac_list);
1529                 INIT_LIST_HEAD(&vport->mc_mac_list);
1530
1531                 if (i == 0)
1532                         ret = hclge_vport_setup(vport, tqp_main_vport);
1533                 else
1534                         ret = hclge_vport_setup(vport, tqp_per_vport);
1535                 if (ret) {
1536                         dev_err(&pdev->dev,
1537                                 "vport setup failed for vport %d, %d\n",
1538                                 i, ret);
1539                         return ret;
1540                 }
1541
1542                 vport++;
1543         }
1544
1545         return 0;
1546 }
1547
1548 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1549                                     struct hclge_pkt_buf_alloc *buf_alloc)
1550 {
1551 /* TX buffer size is unit by 128 byte */
1552 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1553 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1554         struct hclge_tx_buff_alloc_cmd *req;
1555         struct hclge_desc desc;
1556         int ret;
1557         u8 i;
1558
1559         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1560
1561         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1562         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1563                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1564
1565                 req->tx_pkt_buff[i] =
1566                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1567                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1568         }
1569
1570         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1571         if (ret)
1572                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1573                         ret);
1574
1575         return ret;
1576 }
1577
1578 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1579                                  struct hclge_pkt_buf_alloc *buf_alloc)
1580 {
1581         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1582
1583         if (ret)
1584                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1585
1586         return ret;
1587 }
1588
1589 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1590 {
1591         int i, cnt = 0;
1592
1593         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1594                 if (hdev->hw_tc_map & BIT(i))
1595                         cnt++;
1596         return cnt;
1597 }
1598
1599 /* Get the number of pfc enabled TCs, which have private buffer */
1600 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1601                                   struct hclge_pkt_buf_alloc *buf_alloc)
1602 {
1603         struct hclge_priv_buf *priv;
1604         int i, cnt = 0;
1605
1606         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1607                 priv = &buf_alloc->priv_buf[i];
1608                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1609                     priv->enable)
1610                         cnt++;
1611         }
1612
1613         return cnt;
1614 }
1615
1616 /* Get the number of pfc disabled TCs, which have private buffer */
1617 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1618                                      struct hclge_pkt_buf_alloc *buf_alloc)
1619 {
1620         struct hclge_priv_buf *priv;
1621         int i, cnt = 0;
1622
1623         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1624                 priv = &buf_alloc->priv_buf[i];
1625                 if (hdev->hw_tc_map & BIT(i) &&
1626                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1627                     priv->enable)
1628                         cnt++;
1629         }
1630
1631         return cnt;
1632 }
1633
1634 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1635 {
1636         struct hclge_priv_buf *priv;
1637         u32 rx_priv = 0;
1638         int i;
1639
1640         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1641                 priv = &buf_alloc->priv_buf[i];
1642                 if (priv->enable)
1643                         rx_priv += priv->buf_size;
1644         }
1645         return rx_priv;
1646 }
1647
1648 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1649 {
1650         u32 i, total_tx_size = 0;
1651
1652         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1653                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1654
1655         return total_tx_size;
1656 }
1657
1658 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1659                                 struct hclge_pkt_buf_alloc *buf_alloc,
1660                                 u32 rx_all)
1661 {
1662         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1663         u32 tc_num = hclge_get_tc_num(hdev);
1664         u32 shared_buf, aligned_mps;
1665         u32 rx_priv;
1666         int i;
1667
1668         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1669
1670         if (hnae3_dev_dcb_supported(hdev))
1671                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1672                                         hdev->dv_buf_size;
1673         else
1674                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1675                                         + hdev->dv_buf_size;
1676
1677         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1678         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1679                              HCLGE_BUF_SIZE_UNIT);
1680
1681         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1682         if (rx_all < rx_priv + shared_std)
1683                 return false;
1684
1685         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1686         buf_alloc->s_buf.buf_size = shared_buf;
1687         if (hnae3_dev_dcb_supported(hdev)) {
1688                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1689                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1690                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1691                                   HCLGE_BUF_SIZE_UNIT);
1692         } else {
1693                 buf_alloc->s_buf.self.high = aligned_mps +
1694                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1695                 buf_alloc->s_buf.self.low = aligned_mps;
1696         }
1697
1698         if (hnae3_dev_dcb_supported(hdev)) {
1699                 if (tc_num)
1700                         hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1701                 else
1702                         hi_thrd = shared_buf - hdev->dv_buf_size;
1703
1704                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1705                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1706                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1707         } else {
1708                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1709                 lo_thrd = aligned_mps;
1710         }
1711
1712         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1713                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1714                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1715         }
1716
1717         return true;
1718 }
1719
1720 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1721                                 struct hclge_pkt_buf_alloc *buf_alloc)
1722 {
1723         u32 i, total_size;
1724
1725         total_size = hdev->pkt_buf_size;
1726
1727         /* alloc tx buffer for all enabled tc */
1728         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1729                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1730
1731                 if (hdev->hw_tc_map & BIT(i)) {
1732                         if (total_size < hdev->tx_buf_size)
1733                                 return -ENOMEM;
1734
1735                         priv->tx_buf_size = hdev->tx_buf_size;
1736                 } else {
1737                         priv->tx_buf_size = 0;
1738                 }
1739
1740                 total_size -= priv->tx_buf_size;
1741         }
1742
1743         return 0;
1744 }
1745
1746 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1747                                   struct hclge_pkt_buf_alloc *buf_alloc)
1748 {
1749         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1750         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1751         int i;
1752
1753         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1754                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1755
1756                 priv->enable = 0;
1757                 priv->wl.low = 0;
1758                 priv->wl.high = 0;
1759                 priv->buf_size = 0;
1760
1761                 if (!(hdev->hw_tc_map & BIT(i)))
1762                         continue;
1763
1764                 priv->enable = 1;
1765
1766                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1767                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1768                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1769                                                 HCLGE_BUF_SIZE_UNIT);
1770                 } else {
1771                         priv->wl.low = 0;
1772                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1773                                         aligned_mps;
1774                 }
1775
1776                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1777         }
1778
1779         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1780 }
1781
1782 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1783                                           struct hclge_pkt_buf_alloc *buf_alloc)
1784 {
1785         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1786         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1787         int i;
1788
1789         /* let the last to be cleared first */
1790         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1791                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1792
1793                 if (hdev->hw_tc_map & BIT(i) &&
1794                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1795                         /* Clear the no pfc TC private buffer */
1796                         priv->wl.low = 0;
1797                         priv->wl.high = 0;
1798                         priv->buf_size = 0;
1799                         priv->enable = 0;
1800                         no_pfc_priv_num--;
1801                 }
1802
1803                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1804                     no_pfc_priv_num == 0)
1805                         break;
1806         }
1807
1808         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1809 }
1810
1811 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1812                                         struct hclge_pkt_buf_alloc *buf_alloc)
1813 {
1814         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1815         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1816         int i;
1817
1818         /* let the last to be cleared first */
1819         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1820                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1821
1822                 if (hdev->hw_tc_map & BIT(i) &&
1823                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1824                         /* Reduce the number of pfc TC with private buffer */
1825                         priv->wl.low = 0;
1826                         priv->enable = 0;
1827                         priv->wl.high = 0;
1828                         priv->buf_size = 0;
1829                         pfc_priv_num--;
1830                 }
1831
1832                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1833                     pfc_priv_num == 0)
1834                         break;
1835         }
1836
1837         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1838 }
1839
1840 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1841  * @hdev: pointer to struct hclge_dev
1842  * @buf_alloc: pointer to buffer calculation data
1843  * @return: 0: calculate sucessful, negative: fail
1844  */
1845 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1846                                 struct hclge_pkt_buf_alloc *buf_alloc)
1847 {
1848         /* When DCB is not supported, rx private buffer is not allocated. */
1849         if (!hnae3_dev_dcb_supported(hdev)) {
1850                 u32 rx_all = hdev->pkt_buf_size;
1851
1852                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1853                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1854                         return -ENOMEM;
1855
1856                 return 0;
1857         }
1858
1859         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1860                 return 0;
1861
1862         /* try to decrease the buffer size */
1863         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1864                 return 0;
1865
1866         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1867                 return 0;
1868
1869         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1870                 return 0;
1871
1872         return -ENOMEM;
1873 }
1874
1875 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1876                                    struct hclge_pkt_buf_alloc *buf_alloc)
1877 {
1878         struct hclge_rx_priv_buff_cmd *req;
1879         struct hclge_desc desc;
1880         int ret;
1881         int i;
1882
1883         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1884         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1885
1886         /* Alloc private buffer TCs */
1887         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1888                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1889
1890                 req->buf_num[i] =
1891                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1892                 req->buf_num[i] |=
1893                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1894         }
1895
1896         req->shared_buf =
1897                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1898                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1899
1900         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1901         if (ret)
1902                 dev_err(&hdev->pdev->dev,
1903                         "rx private buffer alloc cmd failed %d\n", ret);
1904
1905         return ret;
1906 }
1907
1908 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1909                                    struct hclge_pkt_buf_alloc *buf_alloc)
1910 {
1911         struct hclge_rx_priv_wl_buf *req;
1912         struct hclge_priv_buf *priv;
1913         struct hclge_desc desc[2];
1914         int i, j;
1915         int ret;
1916
1917         for (i = 0; i < 2; i++) {
1918                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1919                                            false);
1920                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1921
1922                 /* The first descriptor set the NEXT bit to 1 */
1923                 if (i == 0)
1924                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1925                 else
1926                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1927
1928                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1929                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1930
1931                         priv = &buf_alloc->priv_buf[idx];
1932                         req->tc_wl[j].high =
1933                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1934                         req->tc_wl[j].high |=
1935                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1936                         req->tc_wl[j].low =
1937                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1938                         req->tc_wl[j].low |=
1939                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1940                 }
1941         }
1942
1943         /* Send 2 descriptor at one time */
1944         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1945         if (ret)
1946                 dev_err(&hdev->pdev->dev,
1947                         "rx private waterline config cmd failed %d\n",
1948                         ret);
1949         return ret;
1950 }
1951
1952 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1953                                     struct hclge_pkt_buf_alloc *buf_alloc)
1954 {
1955         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1956         struct hclge_rx_com_thrd *req;
1957         struct hclge_desc desc[2];
1958         struct hclge_tc_thrd *tc;
1959         int i, j;
1960         int ret;
1961
1962         for (i = 0; i < 2; i++) {
1963                 hclge_cmd_setup_basic_desc(&desc[i],
1964                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1965                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1966
1967                 /* The first descriptor set the NEXT bit to 1 */
1968                 if (i == 0)
1969                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1970                 else
1971                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1972
1973                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1974                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1975
1976                         req->com_thrd[j].high =
1977                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1978                         req->com_thrd[j].high |=
1979                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1980                         req->com_thrd[j].low =
1981                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1982                         req->com_thrd[j].low |=
1983                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984                 }
1985         }
1986
1987         /* Send 2 descriptors at one time */
1988         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1989         if (ret)
1990                 dev_err(&hdev->pdev->dev,
1991                         "common threshold config cmd failed %d\n", ret);
1992         return ret;
1993 }
1994
1995 static int hclge_common_wl_config(struct hclge_dev *hdev,
1996                                   struct hclge_pkt_buf_alloc *buf_alloc)
1997 {
1998         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1999         struct hclge_rx_com_wl *req;
2000         struct hclge_desc desc;
2001         int ret;
2002
2003         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2004
2005         req = (struct hclge_rx_com_wl *)desc.data;
2006         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2007         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2008
2009         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2010         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2011
2012         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2013         if (ret)
2014                 dev_err(&hdev->pdev->dev,
2015                         "common waterline config cmd failed %d\n", ret);
2016
2017         return ret;
2018 }
2019
2020 int hclge_buffer_alloc(struct hclge_dev *hdev)
2021 {
2022         struct hclge_pkt_buf_alloc *pkt_buf;
2023         int ret;
2024
2025         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2026         if (!pkt_buf)
2027                 return -ENOMEM;
2028
2029         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2030         if (ret) {
2031                 dev_err(&hdev->pdev->dev,
2032                         "could not calc tx buffer size for all TCs %d\n", ret);
2033                 goto out;
2034         }
2035
2036         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2037         if (ret) {
2038                 dev_err(&hdev->pdev->dev,
2039                         "could not alloc tx buffers %d\n", ret);
2040                 goto out;
2041         }
2042
2043         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2044         if (ret) {
2045                 dev_err(&hdev->pdev->dev,
2046                         "could not calc rx priv buffer size for all TCs %d\n",
2047                         ret);
2048                 goto out;
2049         }
2050
2051         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2052         if (ret) {
2053                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2054                         ret);
2055                 goto out;
2056         }
2057
2058         if (hnae3_dev_dcb_supported(hdev)) {
2059                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2060                 if (ret) {
2061                         dev_err(&hdev->pdev->dev,
2062                                 "could not configure rx private waterline %d\n",
2063                                 ret);
2064                         goto out;
2065                 }
2066
2067                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2068                 if (ret) {
2069                         dev_err(&hdev->pdev->dev,
2070                                 "could not configure common threshold %d\n",
2071                                 ret);
2072                         goto out;
2073                 }
2074         }
2075
2076         ret = hclge_common_wl_config(hdev, pkt_buf);
2077         if (ret)
2078                 dev_err(&hdev->pdev->dev,
2079                         "could not configure common waterline %d\n", ret);
2080
2081 out:
2082         kfree(pkt_buf);
2083         return ret;
2084 }
2085
2086 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2087 {
2088         struct hnae3_handle *roce = &vport->roce;
2089         struct hnae3_handle *nic = &vport->nic;
2090
2091         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2092
2093         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2094             vport->back->num_msi_left == 0)
2095                 return -EINVAL;
2096
2097         roce->rinfo.base_vector = vport->back->roce_base_vector;
2098
2099         roce->rinfo.netdev = nic->kinfo.netdev;
2100         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2101
2102         roce->pdev = nic->pdev;
2103         roce->ae_algo = nic->ae_algo;
2104         roce->numa_node_mask = nic->numa_node_mask;
2105
2106         return 0;
2107 }
2108
2109 static int hclge_init_msi(struct hclge_dev *hdev)
2110 {
2111         struct pci_dev *pdev = hdev->pdev;
2112         int vectors;
2113         int i;
2114
2115         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2116                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2117         if (vectors < 0) {
2118                 dev_err(&pdev->dev,
2119                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2120                         vectors);
2121                 return vectors;
2122         }
2123         if (vectors < hdev->num_msi)
2124                 dev_warn(&hdev->pdev->dev,
2125                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2126                          hdev->num_msi, vectors);
2127
2128         hdev->num_msi = vectors;
2129         hdev->num_msi_left = vectors;
2130         hdev->base_msi_vector = pdev->irq;
2131         hdev->roce_base_vector = hdev->base_msi_vector +
2132                                 hdev->roce_base_msix_offset;
2133
2134         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2135                                            sizeof(u16), GFP_KERNEL);
2136         if (!hdev->vector_status) {
2137                 pci_free_irq_vectors(pdev);
2138                 return -ENOMEM;
2139         }
2140
2141         for (i = 0; i < hdev->num_msi; i++)
2142                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2143
2144         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2145                                         sizeof(int), GFP_KERNEL);
2146         if (!hdev->vector_irq) {
2147                 pci_free_irq_vectors(pdev);
2148                 return -ENOMEM;
2149         }
2150
2151         return 0;
2152 }
2153
2154 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2155 {
2156
2157         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2158                 duplex = HCLGE_MAC_FULL;
2159
2160         return duplex;
2161 }
2162
2163 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2164                                       u8 duplex)
2165 {
2166         struct hclge_config_mac_speed_dup_cmd *req;
2167         struct hclge_desc desc;
2168         int ret;
2169
2170         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2171
2172         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2173
2174         if (duplex)
2175                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2176
2177         switch (speed) {
2178         case HCLGE_MAC_SPEED_10M:
2179                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2180                                 HCLGE_CFG_SPEED_S, 6);
2181                 break;
2182         case HCLGE_MAC_SPEED_100M:
2183                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2184                                 HCLGE_CFG_SPEED_S, 7);
2185                 break;
2186         case HCLGE_MAC_SPEED_1G:
2187                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2188                                 HCLGE_CFG_SPEED_S, 0);
2189                 break;
2190         case HCLGE_MAC_SPEED_10G:
2191                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2192                                 HCLGE_CFG_SPEED_S, 1);
2193                 break;
2194         case HCLGE_MAC_SPEED_25G:
2195                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2196                                 HCLGE_CFG_SPEED_S, 2);
2197                 break;
2198         case HCLGE_MAC_SPEED_40G:
2199                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2200                                 HCLGE_CFG_SPEED_S, 3);
2201                 break;
2202         case HCLGE_MAC_SPEED_50G:
2203                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2204                                 HCLGE_CFG_SPEED_S, 4);
2205                 break;
2206         case HCLGE_MAC_SPEED_100G:
2207                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2208                                 HCLGE_CFG_SPEED_S, 5);
2209                 break;
2210         default:
2211                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2212                 return -EINVAL;
2213         }
2214
2215         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2216                       1);
2217
2218         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2219         if (ret) {
2220                 dev_err(&hdev->pdev->dev,
2221                         "mac speed/duplex config cmd failed %d.\n", ret);
2222                 return ret;
2223         }
2224
2225         return 0;
2226 }
2227
2228 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2229 {
2230         int ret;
2231
2232         duplex = hclge_check_speed_dup(duplex, speed);
2233         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2234                 return 0;
2235
2236         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2237         if (ret)
2238                 return ret;
2239
2240         hdev->hw.mac.speed = speed;
2241         hdev->hw.mac.duplex = duplex;
2242
2243         return 0;
2244 }
2245
2246 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2247                                      u8 duplex)
2248 {
2249         struct hclge_vport *vport = hclge_get_vport(handle);
2250         struct hclge_dev *hdev = vport->back;
2251
2252         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2253 }
2254
2255 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2256 {
2257         struct hclge_config_auto_neg_cmd *req;
2258         struct hclge_desc desc;
2259         u32 flag = 0;
2260         int ret;
2261
2262         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2263
2264         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2265         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2266         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2267
2268         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2269         if (ret)
2270                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2271                         ret);
2272
2273         return ret;
2274 }
2275
2276 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2277 {
2278         struct hclge_vport *vport = hclge_get_vport(handle);
2279         struct hclge_dev *hdev = vport->back;
2280
2281         if (!hdev->hw.mac.support_autoneg) {
2282                 if (enable) {
2283                         dev_err(&hdev->pdev->dev,
2284                                 "autoneg is not supported by current port\n");
2285                         return -EOPNOTSUPP;
2286                 } else {
2287                         return 0;
2288                 }
2289         }
2290
2291         return hclge_set_autoneg_en(hdev, enable);
2292 }
2293
2294 static int hclge_get_autoneg(struct hnae3_handle *handle)
2295 {
2296         struct hclge_vport *vport = hclge_get_vport(handle);
2297         struct hclge_dev *hdev = vport->back;
2298         struct phy_device *phydev = hdev->hw.mac.phydev;
2299
2300         if (phydev)
2301                 return phydev->autoneg;
2302
2303         return hdev->hw.mac.autoneg;
2304 }
2305
2306 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2307 {
2308         struct hclge_vport *vport = hclge_get_vport(handle);
2309         struct hclge_dev *hdev = vport->back;
2310         int ret;
2311
2312         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2313
2314         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2315         if (ret)
2316                 return ret;
2317         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2318 }
2319
2320 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2321 {
2322         struct hclge_config_fec_cmd *req;
2323         struct hclge_desc desc;
2324         int ret;
2325
2326         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2327
2328         req = (struct hclge_config_fec_cmd *)desc.data;
2329         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2330                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2331         if (fec_mode & BIT(HNAE3_FEC_RS))
2332                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2333                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2334         if (fec_mode & BIT(HNAE3_FEC_BASER))
2335                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2337
2338         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2339         if (ret)
2340                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2341
2342         return ret;
2343 }
2344
2345 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2346 {
2347         struct hclge_vport *vport = hclge_get_vport(handle);
2348         struct hclge_dev *hdev = vport->back;
2349         struct hclge_mac *mac = &hdev->hw.mac;
2350         int ret;
2351
2352         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2353                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2354                 return -EINVAL;
2355         }
2356
2357         ret = hclge_set_fec_hw(hdev, fec_mode);
2358         if (ret)
2359                 return ret;
2360
2361         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2362         return 0;
2363 }
2364
2365 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2366                           u8 *fec_mode)
2367 {
2368         struct hclge_vport *vport = hclge_get_vport(handle);
2369         struct hclge_dev *hdev = vport->back;
2370         struct hclge_mac *mac = &hdev->hw.mac;
2371
2372         if (fec_ability)
2373                 *fec_ability = mac->fec_ability;
2374         if (fec_mode)
2375                 *fec_mode = mac->fec_mode;
2376 }
2377
2378 static int hclge_mac_init(struct hclge_dev *hdev)
2379 {
2380         struct hclge_mac *mac = &hdev->hw.mac;
2381         int ret;
2382
2383         hdev->support_sfp_query = true;
2384         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2385         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2386                                          hdev->hw.mac.duplex);
2387         if (ret) {
2388                 dev_err(&hdev->pdev->dev,
2389                         "Config mac speed dup fail ret=%d\n", ret);
2390                 return ret;
2391         }
2392
2393         mac->link = 0;
2394
2395         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2396                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2397                 if (ret) {
2398                         dev_err(&hdev->pdev->dev,
2399                                 "Fec mode init fail, ret = %d\n", ret);
2400                         return ret;
2401                 }
2402         }
2403
2404         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2405         if (ret) {
2406                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2407                 return ret;
2408         }
2409
2410         ret = hclge_buffer_alloc(hdev);
2411         if (ret)
2412                 dev_err(&hdev->pdev->dev,
2413                         "allocate buffer fail, ret=%d\n", ret);
2414
2415         return ret;
2416 }
2417
2418 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2419 {
2420         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2421             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2422                 schedule_work(&hdev->mbx_service_task);
2423 }
2424
2425 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2426 {
2427         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2428             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2429                 schedule_work(&hdev->rst_service_task);
2430 }
2431
2432 static void hclge_task_schedule(struct hclge_dev *hdev)
2433 {
2434         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2435             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2436             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2437                 (void)schedule_work(&hdev->service_task);
2438 }
2439
2440 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2441 {
2442         struct hclge_link_status_cmd *req;
2443         struct hclge_desc desc;
2444         int link_status;
2445         int ret;
2446
2447         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2448         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2449         if (ret) {
2450                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2451                         ret);
2452                 return ret;
2453         }
2454
2455         req = (struct hclge_link_status_cmd *)desc.data;
2456         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2457
2458         return !!link_status;
2459 }
2460
2461 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2462 {
2463         int mac_state;
2464         int link_stat;
2465
2466         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2467                 return 0;
2468
2469         mac_state = hclge_get_mac_link_status(hdev);
2470
2471         if (hdev->hw.mac.phydev) {
2472                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2473                         link_stat = mac_state &
2474                                 hdev->hw.mac.phydev->link;
2475                 else
2476                         link_stat = 0;
2477
2478         } else {
2479                 link_stat = mac_state;
2480         }
2481
2482         return !!link_stat;
2483 }
2484
2485 static void hclge_update_link_status(struct hclge_dev *hdev)
2486 {
2487         struct hnae3_client *rclient = hdev->roce_client;
2488         struct hnae3_client *client = hdev->nic_client;
2489         struct hnae3_handle *rhandle;
2490         struct hnae3_handle *handle;
2491         int state;
2492         int i;
2493
2494         if (!client)
2495                 return;
2496         state = hclge_get_mac_phy_link(hdev);
2497         if (state != hdev->hw.mac.link) {
2498                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2499                         handle = &hdev->vport[i].nic;
2500                         client->ops->link_status_change(handle, state);
2501                         hclge_config_mac_tnl_int(hdev, state);
2502                         rhandle = &hdev->vport[i].roce;
2503                         if (rclient && rclient->ops->link_status_change)
2504                                 rclient->ops->link_status_change(rhandle,
2505                                                                  state);
2506                 }
2507                 hdev->hw.mac.link = state;
2508         }
2509 }
2510
2511 static void hclge_update_port_capability(struct hclge_mac *mac)
2512 {
2513         /* update fec ability by speed */
2514         hclge_convert_setting_fec(mac);
2515
2516         /* firmware can not identify back plane type, the media type
2517          * read from configuration can help deal it
2518          */
2519         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2520             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2521                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2522         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2523                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2524
2525         if (mac->support_autoneg == true) {
2526                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2527                 linkmode_copy(mac->advertising, mac->supported);
2528         } else {
2529                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2530                                    mac->supported);
2531                 linkmode_zero(mac->advertising);
2532         }
2533 }
2534
2535 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2536 {
2537         struct hclge_sfp_info_cmd *resp;
2538         struct hclge_desc desc;
2539         int ret;
2540
2541         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2542         resp = (struct hclge_sfp_info_cmd *)desc.data;
2543         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2544         if (ret == -EOPNOTSUPP) {
2545                 dev_warn(&hdev->pdev->dev,
2546                          "IMP do not support get SFP speed %d\n", ret);
2547                 return ret;
2548         } else if (ret) {
2549                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2550                 return ret;
2551         }
2552
2553         *speed = le32_to_cpu(resp->speed);
2554
2555         return 0;
2556 }
2557
2558 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2559 {
2560         struct hclge_sfp_info_cmd *resp;
2561         struct hclge_desc desc;
2562         int ret;
2563
2564         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2565         resp = (struct hclge_sfp_info_cmd *)desc.data;
2566
2567         resp->query_type = QUERY_ACTIVE_SPEED;
2568
2569         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2570         if (ret == -EOPNOTSUPP) {
2571                 dev_warn(&hdev->pdev->dev,
2572                          "IMP does not support get SFP info %d\n", ret);
2573                 return ret;
2574         } else if (ret) {
2575                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2576                 return ret;
2577         }
2578
2579         mac->speed = le32_to_cpu(resp->speed);
2580         /* if resp->speed_ability is 0, it means it's an old version
2581          * firmware, do not update these params
2582          */
2583         if (resp->speed_ability) {
2584                 mac->module_type = le32_to_cpu(resp->module_type);
2585                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2586                 mac->autoneg = resp->autoneg;
2587                 mac->support_autoneg = resp->autoneg_ability;
2588                 if (!resp->active_fec)
2589                         mac->fec_mode = 0;
2590                 else
2591                         mac->fec_mode = BIT(resp->active_fec);
2592         } else {
2593                 mac->speed_type = QUERY_SFP_SPEED;
2594         }
2595
2596         return 0;
2597 }
2598
2599 static int hclge_update_port_info(struct hclge_dev *hdev)
2600 {
2601         struct hclge_mac *mac = &hdev->hw.mac;
2602         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2603         int ret;
2604
2605         /* get the port info from SFP cmd if not copper port */
2606         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2607                 return 0;
2608
2609         /* if IMP does not support get SFP/qSFP info, return directly */
2610         if (!hdev->support_sfp_query)
2611                 return 0;
2612
2613         if (hdev->pdev->revision >= 0x21)
2614                 ret = hclge_get_sfp_info(hdev, mac);
2615         else
2616                 ret = hclge_get_sfp_speed(hdev, &speed);
2617
2618         if (ret == -EOPNOTSUPP) {
2619                 hdev->support_sfp_query = false;
2620                 return ret;
2621         } else if (ret) {
2622                 return ret;
2623         }
2624
2625         if (hdev->pdev->revision >= 0x21) {
2626                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2627                         hclge_update_port_capability(mac);
2628                         return 0;
2629                 }
2630                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2631                                                HCLGE_MAC_FULL);
2632         } else {
2633                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2634                         return 0; /* do nothing if no SFP */
2635
2636                 /* must config full duplex for SFP */
2637                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2638         }
2639 }
2640
2641 static int hclge_get_status(struct hnae3_handle *handle)
2642 {
2643         struct hclge_vport *vport = hclge_get_vport(handle);
2644         struct hclge_dev *hdev = vport->back;
2645
2646         hclge_update_link_status(hdev);
2647
2648         return hdev->hw.mac.link;
2649 }
2650
2651 static void hclge_service_timer(struct timer_list *t)
2652 {
2653         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2654
2655         mod_timer(&hdev->service_timer, jiffies + HZ);
2656         hdev->hw_stats.stats_timer++;
2657         hdev->fd_arfs_expire_timer++;
2658         hclge_task_schedule(hdev);
2659 }
2660
2661 static void hclge_service_complete(struct hclge_dev *hdev)
2662 {
2663         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2664
2665         /* Flush memory before next watchdog */
2666         smp_mb__before_atomic();
2667         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2668 }
2669
2670 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2671 {
2672         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2673
2674         /* fetch the events from their corresponding regs */
2675         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2676         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2677         msix_src_reg = hclge_read_dev(&hdev->hw,
2678                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2679
2680         /* Assumption: If by any chance reset and mailbox events are reported
2681          * together then we will only process reset event in this go and will
2682          * defer the processing of the mailbox events. Since, we would have not
2683          * cleared RX CMDQ event this time we would receive again another
2684          * interrupt from H/W just for the mailbox.
2685          */
2686
2687         /* check for vector0 reset event sources */
2688         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2689                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2690                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2691                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2692                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2693                 hdev->rst_stats.imp_rst_cnt++;
2694                 return HCLGE_VECTOR0_EVENT_RST;
2695         }
2696
2697         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2698                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2699                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2700                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2701                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2702                 hdev->rst_stats.global_rst_cnt++;
2703                 return HCLGE_VECTOR0_EVENT_RST;
2704         }
2705
2706         /* check for vector0 msix event source */
2707         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2708                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2709                         msix_src_reg);
2710                 return HCLGE_VECTOR0_EVENT_ERR;
2711         }
2712
2713         /* check for vector0 mailbox(=CMDQ RX) event source */
2714         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2715                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2716                 *clearval = cmdq_src_reg;
2717                 return HCLGE_VECTOR0_EVENT_MBX;
2718         }
2719
2720         /* print other vector0 event source */
2721         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2722                 cmdq_src_reg, msix_src_reg);
2723         return HCLGE_VECTOR0_EVENT_OTHER;
2724 }
2725
2726 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2727                                     u32 regclr)
2728 {
2729         switch (event_type) {
2730         case HCLGE_VECTOR0_EVENT_RST:
2731                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2732                 break;
2733         case HCLGE_VECTOR0_EVENT_MBX:
2734                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2735                 break;
2736         default:
2737                 break;
2738         }
2739 }
2740
2741 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2742 {
2743         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2744                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2745                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2746                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2747         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2748 }
2749
2750 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2751 {
2752         writel(enable ? 1 : 0, vector->addr);
2753 }
2754
2755 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2756 {
2757         struct hclge_dev *hdev = data;
2758         u32 event_cause;
2759         u32 clearval;
2760
2761         hclge_enable_vector(&hdev->misc_vector, false);
2762         event_cause = hclge_check_event_cause(hdev, &clearval);
2763
2764         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2765         switch (event_cause) {
2766         case HCLGE_VECTOR0_EVENT_ERR:
2767                 /* we do not know what type of reset is required now. This could
2768                  * only be decided after we fetch the type of errors which
2769                  * caused this event. Therefore, we will do below for now:
2770                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2771                  *    have defered type of reset to be used.
2772                  * 2. Schedule the reset serivce task.
2773                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2774                  *    will fetch the correct type of reset.  This would be done
2775                  *    by first decoding the types of errors.
2776                  */
2777                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2778                 /* fall through */
2779         case HCLGE_VECTOR0_EVENT_RST:
2780                 hclge_reset_task_schedule(hdev);
2781                 break;
2782         case HCLGE_VECTOR0_EVENT_MBX:
2783                 /* If we are here then,
2784                  * 1. Either we are not handling any mbx task and we are not
2785                  *    scheduled as well
2786                  *                        OR
2787                  * 2. We could be handling a mbx task but nothing more is
2788                  *    scheduled.
2789                  * In both cases, we should schedule mbx task as there are more
2790                  * mbx messages reported by this interrupt.
2791                  */
2792                 hclge_mbx_task_schedule(hdev);
2793                 break;
2794         default:
2795                 dev_warn(&hdev->pdev->dev,
2796                          "received unknown or unhandled event of vector0\n");
2797                 break;
2798         }
2799
2800         /* clear the source of interrupt if it is not cause by reset */
2801         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2802                 hclge_clear_event_cause(hdev, event_cause, clearval);
2803                 hclge_enable_vector(&hdev->misc_vector, true);
2804         }
2805
2806         return IRQ_HANDLED;
2807 }
2808
2809 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2810 {
2811         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2812                 dev_warn(&hdev->pdev->dev,
2813                          "vector(vector_id %d) has been freed.\n", vector_id);
2814                 return;
2815         }
2816
2817         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2818         hdev->num_msi_left += 1;
2819         hdev->num_msi_used -= 1;
2820 }
2821
2822 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2823 {
2824         struct hclge_misc_vector *vector = &hdev->misc_vector;
2825
2826         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2827
2828         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2829         hdev->vector_status[0] = 0;
2830
2831         hdev->num_msi_left -= 1;
2832         hdev->num_msi_used += 1;
2833 }
2834
2835 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2836 {
2837         int ret;
2838
2839         hclge_get_misc_vector(hdev);
2840
2841         /* this would be explicitly freed in the end */
2842         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2843                           0, "hclge_misc", hdev);
2844         if (ret) {
2845                 hclge_free_vector(hdev, 0);
2846                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2847                         hdev->misc_vector.vector_irq);
2848         }
2849
2850         return ret;
2851 }
2852
2853 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2854 {
2855         free_irq(hdev->misc_vector.vector_irq, hdev);
2856         hclge_free_vector(hdev, 0);
2857 }
2858
2859 int hclge_notify_client(struct hclge_dev *hdev,
2860                         enum hnae3_reset_notify_type type)
2861 {
2862         struct hnae3_client *client = hdev->nic_client;
2863         u16 i;
2864
2865         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2866             !client)
2867                 return 0;
2868
2869         if (!client->ops->reset_notify)
2870                 return -EOPNOTSUPP;
2871
2872         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2873                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2874                 int ret;
2875
2876                 ret = client->ops->reset_notify(handle, type);
2877                 if (ret) {
2878                         dev_err(&hdev->pdev->dev,
2879                                 "notify nic client failed %d(%d)\n", type, ret);
2880                         return ret;
2881                 }
2882         }
2883
2884         return 0;
2885 }
2886
2887 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2888                                     enum hnae3_reset_notify_type type)
2889 {
2890         struct hnae3_client *client = hdev->roce_client;
2891         int ret = 0;
2892         u16 i;
2893
2894         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
2895             !client)
2896                 return 0;
2897
2898         if (!client->ops->reset_notify)
2899                 return -EOPNOTSUPP;
2900
2901         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2902                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2903
2904                 ret = client->ops->reset_notify(handle, type);
2905                 if (ret) {
2906                         dev_err(&hdev->pdev->dev,
2907                                 "notify roce client failed %d(%d)",
2908                                 type, ret);
2909                         return ret;
2910                 }
2911         }
2912
2913         return ret;
2914 }
2915
2916 static int hclge_reset_wait(struct hclge_dev *hdev)
2917 {
2918 #define HCLGE_RESET_WATI_MS     100
2919 #define HCLGE_RESET_WAIT_CNT    200
2920         u32 val, reg, reg_bit;
2921         u32 cnt = 0;
2922
2923         switch (hdev->reset_type) {
2924         case HNAE3_IMP_RESET:
2925                 reg = HCLGE_GLOBAL_RESET_REG;
2926                 reg_bit = HCLGE_IMP_RESET_BIT;
2927                 break;
2928         case HNAE3_GLOBAL_RESET:
2929                 reg = HCLGE_GLOBAL_RESET_REG;
2930                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2931                 break;
2932         case HNAE3_FUNC_RESET:
2933                 reg = HCLGE_FUN_RST_ING;
2934                 reg_bit = HCLGE_FUN_RST_ING_B;
2935                 break;
2936         case HNAE3_FLR_RESET:
2937                 break;
2938         default:
2939                 dev_err(&hdev->pdev->dev,
2940                         "Wait for unsupported reset type: %d\n",
2941                         hdev->reset_type);
2942                 return -EINVAL;
2943         }
2944
2945         if (hdev->reset_type == HNAE3_FLR_RESET) {
2946                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2947                        cnt++ < HCLGE_RESET_WAIT_CNT)
2948                         msleep(HCLGE_RESET_WATI_MS);
2949
2950                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2951                         dev_err(&hdev->pdev->dev,
2952                                 "flr wait timeout: %d\n", cnt);
2953                         return -EBUSY;
2954                 }
2955
2956                 return 0;
2957         }
2958
2959         val = hclge_read_dev(&hdev->hw, reg);
2960         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2961                 msleep(HCLGE_RESET_WATI_MS);
2962                 val = hclge_read_dev(&hdev->hw, reg);
2963                 cnt++;
2964         }
2965
2966         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2967                 dev_warn(&hdev->pdev->dev,
2968                          "Wait for reset timeout: %d\n", hdev->reset_type);
2969                 return -EBUSY;
2970         }
2971
2972         return 0;
2973 }
2974
2975 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2976 {
2977         struct hclge_vf_rst_cmd *req;
2978         struct hclge_desc desc;
2979
2980         req = (struct hclge_vf_rst_cmd *)desc.data;
2981         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2982         req->dest_vfid = func_id;
2983
2984         if (reset)
2985                 req->vf_rst = 0x1;
2986
2987         return hclge_cmd_send(&hdev->hw, &desc, 1);
2988 }
2989
2990 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2991 {
2992         int i;
2993
2994         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2995                 struct hclge_vport *vport = &hdev->vport[i];
2996                 int ret;
2997
2998                 /* Send cmd to set/clear VF's FUNC_RST_ING */
2999                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3000                 if (ret) {
3001                         dev_err(&hdev->pdev->dev,
3002                                 "set vf(%d) rst failed %d!\n",
3003                                 vport->vport_id, ret);
3004                         return ret;
3005                 }
3006
3007                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3008                         continue;
3009
3010                 /* Inform VF to process the reset.
3011                  * hclge_inform_reset_assert_to_vf may fail if VF
3012                  * driver is not loaded.
3013                  */
3014                 ret = hclge_inform_reset_assert_to_vf(vport);
3015                 if (ret)
3016                         dev_warn(&hdev->pdev->dev,
3017                                  "inform reset to vf(%d) failed %d!\n",
3018                                  vport->vport_id, ret);
3019         }
3020
3021         return 0;
3022 }
3023
3024 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3025 {
3026         struct hclge_desc desc;
3027         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3028         int ret;
3029
3030         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3031         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3032         req->fun_reset_vfid = func_id;
3033
3034         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3035         if (ret)
3036                 dev_err(&hdev->pdev->dev,
3037                         "send function reset cmd fail, status =%d\n", ret);
3038
3039         return ret;
3040 }
3041
3042 static void hclge_do_reset(struct hclge_dev *hdev)
3043 {
3044         struct hnae3_handle *handle = &hdev->vport[0].nic;
3045         struct pci_dev *pdev = hdev->pdev;
3046         u32 val;
3047
3048         if (hclge_get_hw_reset_stat(handle)) {
3049                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3050                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3051                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3052                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3053                 return;
3054         }
3055
3056         switch (hdev->reset_type) {
3057         case HNAE3_GLOBAL_RESET:
3058                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3059                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3060                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3061                 dev_info(&pdev->dev, "Global Reset requested\n");
3062                 break;
3063         case HNAE3_FUNC_RESET:
3064                 dev_info(&pdev->dev, "PF Reset requested\n");
3065                 /* schedule again to check later */
3066                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3067                 hclge_reset_task_schedule(hdev);
3068                 break;
3069         case HNAE3_FLR_RESET:
3070                 dev_info(&pdev->dev, "FLR requested\n");
3071                 /* schedule again to check later */
3072                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3073                 hclge_reset_task_schedule(hdev);
3074                 break;
3075         default:
3076                 dev_warn(&pdev->dev,
3077                          "Unsupported reset type: %d\n", hdev->reset_type);
3078                 break;
3079         }
3080 }
3081
3082 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3083                                                    unsigned long *addr)
3084 {
3085         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3086
3087         /* first, resolve any unknown reset type to the known type(s) */
3088         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3089                 /* we will intentionally ignore any errors from this function
3090                  *  as we will end up in *some* reset request in any case
3091                  */
3092                 hclge_handle_hw_msix_error(hdev, addr);
3093                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3094                 /* We defered the clearing of the error event which caused
3095                  * interrupt since it was not posssible to do that in
3096                  * interrupt context (and this is the reason we introduced
3097                  * new UNKNOWN reset type). Now, the errors have been
3098                  * handled and cleared in hardware we can safely enable
3099                  * interrupts. This is an exception to the norm.
3100                  */
3101                 hclge_enable_vector(&hdev->misc_vector, true);
3102         }
3103
3104         /* return the highest priority reset level amongst all */
3105         if (test_bit(HNAE3_IMP_RESET, addr)) {
3106                 rst_level = HNAE3_IMP_RESET;
3107                 clear_bit(HNAE3_IMP_RESET, addr);
3108                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3109                 clear_bit(HNAE3_FUNC_RESET, addr);
3110         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3111                 rst_level = HNAE3_GLOBAL_RESET;
3112                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3113                 clear_bit(HNAE3_FUNC_RESET, addr);
3114         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3115                 rst_level = HNAE3_FUNC_RESET;
3116                 clear_bit(HNAE3_FUNC_RESET, addr);
3117         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3118                 rst_level = HNAE3_FLR_RESET;
3119                 clear_bit(HNAE3_FLR_RESET, addr);
3120         }
3121
3122         if (hdev->reset_type != HNAE3_NONE_RESET &&
3123             rst_level < hdev->reset_type)
3124                 return HNAE3_NONE_RESET;
3125
3126         return rst_level;
3127 }
3128
3129 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3130 {
3131         u32 clearval = 0;
3132
3133         switch (hdev->reset_type) {
3134         case HNAE3_IMP_RESET:
3135                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3136                 break;
3137         case HNAE3_GLOBAL_RESET:
3138                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3139                 break;
3140         default:
3141                 break;
3142         }
3143
3144         if (!clearval)
3145                 return;
3146
3147         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3148         hclge_enable_vector(&hdev->misc_vector, true);
3149 }
3150
3151 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3152 {
3153         int ret = 0;
3154
3155         switch (hdev->reset_type) {
3156         case HNAE3_FUNC_RESET:
3157                 /* fall through */
3158         case HNAE3_FLR_RESET:
3159                 ret = hclge_set_all_vf_rst(hdev, true);
3160                 break;
3161         default:
3162                 break;
3163         }
3164
3165         return ret;
3166 }
3167
3168 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3169 {
3170 #define HCLGE_RESET_SYNC_TIME 100
3171
3172         u32 reg_val;
3173         int ret = 0;
3174
3175         switch (hdev->reset_type) {
3176         case HNAE3_FUNC_RESET:
3177                 /* There is no mechanism for PF to know if VF has stopped IO
3178                  * for now, just wait 100 ms for VF to stop IO
3179                  */
3180                 msleep(HCLGE_RESET_SYNC_TIME);
3181                 ret = hclge_func_reset_cmd(hdev, 0);
3182                 if (ret) {
3183                         dev_err(&hdev->pdev->dev,
3184                                 "asserting function reset fail %d!\n", ret);
3185                         return ret;
3186                 }
3187
3188                 /* After performaning pf reset, it is not necessary to do the
3189                  * mailbox handling or send any command to firmware, because
3190                  * any mailbox handling or command to firmware is only valid
3191                  * after hclge_cmd_init is called.
3192                  */
3193                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3194                 hdev->rst_stats.pf_rst_cnt++;
3195                 break;
3196         case HNAE3_FLR_RESET:
3197                 /* There is no mechanism for PF to know if VF has stopped IO
3198                  * for now, just wait 100 ms for VF to stop IO
3199                  */
3200                 msleep(HCLGE_RESET_SYNC_TIME);
3201                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3202                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3203                 hdev->rst_stats.flr_rst_cnt++;
3204                 break;
3205         case HNAE3_IMP_RESET:
3206                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3207                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3208                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3209                 break;
3210         default:
3211                 break;
3212         }
3213
3214         /* inform hardware that preparatory work is done */
3215         msleep(HCLGE_RESET_SYNC_TIME);
3216         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3217                         HCLGE_NIC_CMQ_ENABLE);
3218         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3219
3220         return ret;
3221 }
3222
3223 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3224 {
3225 #define MAX_RESET_FAIL_CNT 5
3226
3227         if (hdev->reset_pending) {
3228                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3229                          hdev->reset_pending);
3230                 return true;
3231         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3232                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3233                     BIT(HCLGE_IMP_RESET_BIT))) {
3234                 dev_info(&hdev->pdev->dev,
3235                          "reset failed because IMP Reset is pending\n");
3236                 hclge_clear_reset_cause(hdev);
3237                 return false;
3238         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3239                 hdev->reset_fail_cnt++;
3240                 if (is_timeout) {
3241                         set_bit(hdev->reset_type, &hdev->reset_pending);
3242                         dev_info(&hdev->pdev->dev,
3243                                  "re-schedule to wait for hw reset done\n");
3244                         return true;
3245                 }
3246
3247                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3248                 hclge_clear_reset_cause(hdev);
3249                 mod_timer(&hdev->reset_timer,
3250                           jiffies + HCLGE_RESET_INTERVAL);
3251
3252                 return false;
3253         }
3254
3255         hclge_clear_reset_cause(hdev);
3256         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3257         return false;
3258 }
3259
3260 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3261 {
3262         int ret = 0;
3263
3264         switch (hdev->reset_type) {
3265         case HNAE3_FUNC_RESET:
3266                 /* fall through */
3267         case HNAE3_FLR_RESET:
3268                 ret = hclge_set_all_vf_rst(hdev, false);
3269                 break;
3270         default:
3271                 break;
3272         }
3273
3274         return ret;
3275 }
3276
3277 static int hclge_reset_stack(struct hclge_dev *hdev)
3278 {
3279         int ret;
3280
3281         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3282         if (ret)
3283                 return ret;
3284
3285         ret = hclge_reset_ae_dev(hdev->ae_dev);
3286         if (ret)
3287                 return ret;
3288
3289         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3290         if (ret)
3291                 return ret;
3292
3293         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3294 }
3295
3296 static void hclge_reset(struct hclge_dev *hdev)
3297 {
3298         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3299         bool is_timeout = false;
3300         int ret;
3301
3302         /* Initialize ae_dev reset status as well, in case enet layer wants to
3303          * know if device is undergoing reset
3304          */
3305         ae_dev->reset_type = hdev->reset_type;
3306         hdev->rst_stats.reset_cnt++;
3307         /* perform reset of the stack & ae device for a client */
3308         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3309         if (ret)
3310                 goto err_reset;
3311
3312         ret = hclge_reset_prepare_down(hdev);
3313         if (ret)
3314                 goto err_reset;
3315
3316         rtnl_lock();
3317         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3318         if (ret)
3319                 goto err_reset_lock;
3320
3321         rtnl_unlock();
3322
3323         ret = hclge_reset_prepare_wait(hdev);
3324         if (ret)
3325                 goto err_reset;
3326
3327         if (hclge_reset_wait(hdev)) {
3328                 is_timeout = true;
3329                 goto err_reset;
3330         }
3331
3332         hdev->rst_stats.hw_reset_done_cnt++;
3333
3334         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3335         if (ret)
3336                 goto err_reset;
3337
3338         rtnl_lock();
3339
3340         ret = hclge_reset_stack(hdev);
3341         if (ret)
3342                 goto err_reset_lock;
3343
3344         hclge_clear_reset_cause(hdev);
3345
3346         ret = hclge_reset_prepare_up(hdev);
3347         if (ret)
3348                 goto err_reset_lock;
3349
3350         rtnl_unlock();
3351
3352         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3353         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3354          * times
3355          */
3356         if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3357                 goto err_reset;
3358
3359         rtnl_lock();
3360
3361         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3362         if (ret)
3363                 goto err_reset_lock;
3364
3365         rtnl_unlock();
3366
3367         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3368         if (ret)
3369                 goto err_reset;
3370
3371         hdev->last_reset_time = jiffies;
3372         hdev->reset_fail_cnt = 0;
3373         hdev->rst_stats.reset_done_cnt++;
3374         ae_dev->reset_type = HNAE3_NONE_RESET;
3375         del_timer(&hdev->reset_timer);
3376
3377         return;
3378
3379 err_reset_lock:
3380         rtnl_unlock();
3381 err_reset:
3382         if (hclge_reset_err_handle(hdev, is_timeout))
3383                 hclge_reset_task_schedule(hdev);
3384 }
3385
3386 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3387 {
3388         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3389         struct hclge_dev *hdev = ae_dev->priv;
3390
3391         /* We might end up getting called broadly because of 2 below cases:
3392          * 1. Recoverable error was conveyed through APEI and only way to bring
3393          *    normalcy is to reset.
3394          * 2. A new reset request from the stack due to timeout
3395          *
3396          * For the first case,error event might not have ae handle available.
3397          * check if this is a new reset request and we are not here just because
3398          * last reset attempt did not succeed and watchdog hit us again. We will
3399          * know this if last reset request did not occur very recently (watchdog
3400          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3401          * In case of new request we reset the "reset level" to PF reset.
3402          * And if it is a repeat reset request of the most recent one then we
3403          * want to make sure we throttle the reset request. Therefore, we will
3404          * not allow it again before 3*HZ times.
3405          */
3406         if (!handle)
3407                 handle = &hdev->vport[0].nic;
3408
3409         if (time_before(jiffies, (hdev->last_reset_time +
3410                                   HCLGE_RESET_INTERVAL)))
3411                 return;
3412         else if (hdev->default_reset_request)
3413                 hdev->reset_level =
3414                         hclge_get_reset_level(hdev,
3415                                               &hdev->default_reset_request);
3416         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3417                 hdev->reset_level = HNAE3_FUNC_RESET;
3418
3419         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3420                  hdev->reset_level);
3421
3422         /* request reset & schedule reset task */
3423         set_bit(hdev->reset_level, &hdev->reset_request);
3424         hclge_reset_task_schedule(hdev);
3425
3426         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3427                 hdev->reset_level++;
3428 }
3429
3430 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3431                                         enum hnae3_reset_type rst_type)
3432 {
3433         struct hclge_dev *hdev = ae_dev->priv;
3434
3435         set_bit(rst_type, &hdev->default_reset_request);
3436 }
3437
3438 static void hclge_reset_timer(struct timer_list *t)
3439 {
3440         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3441
3442         dev_info(&hdev->pdev->dev,
3443                  "triggering global reset in reset timer\n");
3444         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3445         hclge_reset_event(hdev->pdev, NULL);
3446 }
3447
3448 static void hclge_reset_subtask(struct hclge_dev *hdev)
3449 {
3450         /* check if there is any ongoing reset in the hardware. This status can
3451          * be checked from reset_pending. If there is then, we need to wait for
3452          * hardware to complete reset.
3453          *    a. If we are able to figure out in reasonable time that hardware
3454          *       has fully resetted then, we can proceed with driver, client
3455          *       reset.
3456          *    b. else, we can come back later to check this status so re-sched
3457          *       now.
3458          */
3459         hdev->last_reset_time = jiffies;
3460         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3461         if (hdev->reset_type != HNAE3_NONE_RESET)
3462                 hclge_reset(hdev);
3463
3464         /* check if we got any *new* reset requests to be honored */
3465         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3466         if (hdev->reset_type != HNAE3_NONE_RESET)
3467                 hclge_do_reset(hdev);
3468
3469         hdev->reset_type = HNAE3_NONE_RESET;
3470 }
3471
3472 static void hclge_reset_service_task(struct work_struct *work)
3473 {
3474         struct hclge_dev *hdev =
3475                 container_of(work, struct hclge_dev, rst_service_task);
3476
3477         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3478                 return;
3479
3480         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3481
3482         hclge_reset_subtask(hdev);
3483
3484         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3485 }
3486
3487 static void hclge_mailbox_service_task(struct work_struct *work)
3488 {
3489         struct hclge_dev *hdev =
3490                 container_of(work, struct hclge_dev, mbx_service_task);
3491
3492         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3493                 return;
3494
3495         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3496
3497         hclge_mbx_handler(hdev);
3498
3499         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3500 }
3501
3502 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3503 {
3504         int i;
3505
3506         /* start from vport 1 for PF is always alive */
3507         for (i = 1; i < hdev->num_alloc_vport; i++) {
3508                 struct hclge_vport *vport = &hdev->vport[i];
3509
3510                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3511                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3512
3513                 /* If vf is not alive, set to default value */
3514                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3515                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3516         }
3517 }
3518
3519 static void hclge_service_task(struct work_struct *work)
3520 {
3521         struct hclge_dev *hdev =
3522                 container_of(work, struct hclge_dev, service_task);
3523
3524         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3525                 hclge_update_stats_for_all(hdev);
3526                 hdev->hw_stats.stats_timer = 0;
3527         }
3528
3529         hclge_update_port_info(hdev);
3530         hclge_update_link_status(hdev);
3531         hclge_update_vport_alive(hdev);
3532         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3533                 hclge_rfs_filter_expire(hdev);
3534                 hdev->fd_arfs_expire_timer = 0;
3535         }
3536         hclge_service_complete(hdev);
3537 }
3538
3539 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3540 {
3541         /* VF handle has no client */
3542         if (!handle->client)
3543                 return container_of(handle, struct hclge_vport, nic);
3544         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3545                 return container_of(handle, struct hclge_vport, roce);
3546         else
3547                 return container_of(handle, struct hclge_vport, nic);
3548 }
3549
3550 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3551                             struct hnae3_vector_info *vector_info)
3552 {
3553         struct hclge_vport *vport = hclge_get_vport(handle);
3554         struct hnae3_vector_info *vector = vector_info;
3555         struct hclge_dev *hdev = vport->back;
3556         int alloc = 0;
3557         int i, j;
3558
3559         vector_num = min(hdev->num_msi_left, vector_num);
3560
3561         for (j = 0; j < vector_num; j++) {
3562                 for (i = 1; i < hdev->num_msi; i++) {
3563                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3564                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3565                                 vector->io_addr = hdev->hw.io_base +
3566                                         HCLGE_VECTOR_REG_BASE +
3567                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3568                                         vport->vport_id *
3569                                         HCLGE_VECTOR_VF_OFFSET;
3570                                 hdev->vector_status[i] = vport->vport_id;
3571                                 hdev->vector_irq[i] = vector->vector;
3572
3573                                 vector++;
3574                                 alloc++;
3575
3576                                 break;
3577                         }
3578                 }
3579         }
3580         hdev->num_msi_left -= alloc;
3581         hdev->num_msi_used += alloc;
3582
3583         return alloc;
3584 }
3585
3586 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3587 {
3588         int i;
3589
3590         for (i = 0; i < hdev->num_msi; i++)
3591                 if (vector == hdev->vector_irq[i])
3592                         return i;
3593
3594         return -EINVAL;
3595 }
3596
3597 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3598 {
3599         struct hclge_vport *vport = hclge_get_vport(handle);
3600         struct hclge_dev *hdev = vport->back;
3601         int vector_id;
3602
3603         vector_id = hclge_get_vector_index(hdev, vector);
3604         if (vector_id < 0) {
3605                 dev_err(&hdev->pdev->dev,
3606                         "Get vector index fail. vector_id =%d\n", vector_id);
3607                 return vector_id;
3608         }
3609
3610         hclge_free_vector(hdev, vector_id);
3611
3612         return 0;
3613 }
3614
3615 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3616 {
3617         return HCLGE_RSS_KEY_SIZE;
3618 }
3619
3620 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3621 {
3622         return HCLGE_RSS_IND_TBL_SIZE;
3623 }
3624
3625 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3626                                   const u8 hfunc, const u8 *key)
3627 {
3628         struct hclge_rss_config_cmd *req;
3629         struct hclge_desc desc;
3630         int key_offset = 0;
3631         int key_counts;
3632         int key_size;
3633         int ret;
3634
3635         key_counts = HCLGE_RSS_KEY_SIZE;
3636         req = (struct hclge_rss_config_cmd *)desc.data;
3637
3638         while (key_counts) {
3639                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3640                                            false);
3641
3642                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3643                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3644
3645                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3646                 memcpy(req->hash_key,
3647                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3648
3649                 key_counts -= key_size;
3650                 key_offset++;
3651                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3652                 if (ret) {
3653                         dev_err(&hdev->pdev->dev,
3654                                 "Configure RSS config fail, status = %d\n",
3655                                 ret);
3656                         return ret;
3657                 }
3658         }
3659         return 0;
3660 }
3661
3662 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3663 {
3664         struct hclge_rss_indirection_table_cmd *req;
3665         struct hclge_desc desc;
3666         int i, j;
3667         int ret;
3668
3669         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3670
3671         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3672                 hclge_cmd_setup_basic_desc
3673                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3674
3675                 req->start_table_index =
3676                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3677                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3678
3679                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3680                         req->rss_result[j] =
3681                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3682
3683                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3684                 if (ret) {
3685                         dev_err(&hdev->pdev->dev,
3686                                 "Configure rss indir table fail,status = %d\n",
3687                                 ret);
3688                         return ret;
3689                 }
3690         }
3691         return 0;
3692 }
3693
3694 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3695                                  u16 *tc_size, u16 *tc_offset)
3696 {
3697         struct hclge_rss_tc_mode_cmd *req;
3698         struct hclge_desc desc;
3699         int ret;
3700         int i;
3701
3702         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3703         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3704
3705         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3706                 u16 mode = 0;
3707
3708                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3709                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3710                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3711                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3712                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3713
3714                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3715         }
3716
3717         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3718         if (ret)
3719                 dev_err(&hdev->pdev->dev,
3720                         "Configure rss tc mode fail, status = %d\n", ret);
3721
3722         return ret;
3723 }
3724
3725 static void hclge_get_rss_type(struct hclge_vport *vport)
3726 {
3727         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3728             vport->rss_tuple_sets.ipv4_udp_en ||
3729             vport->rss_tuple_sets.ipv4_sctp_en ||
3730             vport->rss_tuple_sets.ipv6_tcp_en ||
3731             vport->rss_tuple_sets.ipv6_udp_en ||
3732             vport->rss_tuple_sets.ipv6_sctp_en)
3733                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3734         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3735                  vport->rss_tuple_sets.ipv6_fragment_en)
3736                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3737         else
3738                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3739 }
3740
3741 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3742 {
3743         struct hclge_rss_input_tuple_cmd *req;
3744         struct hclge_desc desc;
3745         int ret;
3746
3747         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3748
3749         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3750
3751         /* Get the tuple cfg from pf */
3752         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3753         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3754         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3755         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3756         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3757         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3758         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3759         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3760         hclge_get_rss_type(&hdev->vport[0]);
3761         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3762         if (ret)
3763                 dev_err(&hdev->pdev->dev,
3764                         "Configure rss input fail, status = %d\n", ret);
3765         return ret;
3766 }
3767
3768 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3769                          u8 *key, u8 *hfunc)
3770 {
3771         struct hclge_vport *vport = hclge_get_vport(handle);
3772         int i;
3773
3774         /* Get hash algorithm */
3775         if (hfunc) {
3776                 switch (vport->rss_algo) {
3777                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3778                         *hfunc = ETH_RSS_HASH_TOP;
3779                         break;
3780                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3781                         *hfunc = ETH_RSS_HASH_XOR;
3782                         break;
3783                 default:
3784                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3785                         break;
3786                 }
3787         }
3788
3789         /* Get the RSS Key required by the user */
3790         if (key)
3791                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3792
3793         /* Get indirect table */
3794         if (indir)
3795                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3796                         indir[i] =  vport->rss_indirection_tbl[i];
3797
3798         return 0;
3799 }
3800
3801 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3802                          const  u8 *key, const  u8 hfunc)
3803 {
3804         struct hclge_vport *vport = hclge_get_vport(handle);
3805         struct hclge_dev *hdev = vport->back;
3806         u8 hash_algo;
3807         int ret, i;
3808
3809         /* Set the RSS Hash Key if specififed by the user */
3810         if (key) {
3811                 switch (hfunc) {
3812                 case ETH_RSS_HASH_TOP:
3813                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3814                         break;
3815                 case ETH_RSS_HASH_XOR:
3816                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3817                         break;
3818                 case ETH_RSS_HASH_NO_CHANGE:
3819                         hash_algo = vport->rss_algo;
3820                         break;
3821                 default:
3822                         return -EINVAL;
3823                 }
3824
3825                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3826                 if (ret)
3827                         return ret;
3828
3829                 /* Update the shadow RSS key with user specified qids */
3830                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3831                 vport->rss_algo = hash_algo;
3832         }
3833
3834         /* Update the shadow RSS table with user specified qids */
3835         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3836                 vport->rss_indirection_tbl[i] = indir[i];
3837
3838         /* Update the hardware */
3839         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3840 }
3841
3842 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3843 {
3844         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3845
3846         if (nfc->data & RXH_L4_B_2_3)
3847                 hash_sets |= HCLGE_D_PORT_BIT;
3848         else
3849                 hash_sets &= ~HCLGE_D_PORT_BIT;
3850
3851         if (nfc->data & RXH_IP_SRC)
3852                 hash_sets |= HCLGE_S_IP_BIT;
3853         else
3854                 hash_sets &= ~HCLGE_S_IP_BIT;
3855
3856         if (nfc->data & RXH_IP_DST)
3857                 hash_sets |= HCLGE_D_IP_BIT;
3858         else
3859                 hash_sets &= ~HCLGE_D_IP_BIT;
3860
3861         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3862                 hash_sets |= HCLGE_V_TAG_BIT;
3863
3864         return hash_sets;
3865 }
3866
3867 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3868                                struct ethtool_rxnfc *nfc)
3869 {
3870         struct hclge_vport *vport = hclge_get_vport(handle);
3871         struct hclge_dev *hdev = vport->back;
3872         struct hclge_rss_input_tuple_cmd *req;
3873         struct hclge_desc desc;
3874         u8 tuple_sets;
3875         int ret;
3876
3877         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3878                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3879                 return -EINVAL;
3880
3881         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3882         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3883
3884         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3885         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3886         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3887         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3888         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3889         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3890         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3891         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3892
3893         tuple_sets = hclge_get_rss_hash_bits(nfc);
3894         switch (nfc->flow_type) {
3895         case TCP_V4_FLOW:
3896                 req->ipv4_tcp_en = tuple_sets;
3897                 break;
3898         case TCP_V6_FLOW:
3899                 req->ipv6_tcp_en = tuple_sets;
3900                 break;
3901         case UDP_V4_FLOW:
3902                 req->ipv4_udp_en = tuple_sets;
3903                 break;
3904         case UDP_V6_FLOW:
3905                 req->ipv6_udp_en = tuple_sets;
3906                 break;
3907         case SCTP_V4_FLOW:
3908                 req->ipv4_sctp_en = tuple_sets;
3909                 break;
3910         case SCTP_V6_FLOW:
3911                 if ((nfc->data & RXH_L4_B_0_1) ||
3912                     (nfc->data & RXH_L4_B_2_3))
3913                         return -EINVAL;
3914
3915                 req->ipv6_sctp_en = tuple_sets;
3916                 break;
3917         case IPV4_FLOW:
3918                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3919                 break;
3920         case IPV6_FLOW:
3921                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3922                 break;
3923         default:
3924                 return -EINVAL;
3925         }
3926
3927         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3928         if (ret) {
3929                 dev_err(&hdev->pdev->dev,
3930                         "Set rss tuple fail, status = %d\n", ret);
3931                 return ret;
3932         }
3933
3934         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3935         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3936         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3937         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3938         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3939         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3940         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3941         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3942         hclge_get_rss_type(vport);
3943         return 0;
3944 }
3945
3946 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3947                                struct ethtool_rxnfc *nfc)
3948 {
3949         struct hclge_vport *vport = hclge_get_vport(handle);
3950         u8 tuple_sets;
3951
3952         nfc->data = 0;
3953
3954         switch (nfc->flow_type) {
3955         case TCP_V4_FLOW:
3956                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3957                 break;
3958         case UDP_V4_FLOW:
3959                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3960                 break;
3961         case TCP_V6_FLOW:
3962                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3963                 break;
3964         case UDP_V6_FLOW:
3965                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3966                 break;
3967         case SCTP_V4_FLOW:
3968                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3969                 break;
3970         case SCTP_V6_FLOW:
3971                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3972                 break;
3973         case IPV4_FLOW:
3974         case IPV6_FLOW:
3975                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3976                 break;
3977         default:
3978                 return -EINVAL;
3979         }
3980
3981         if (!tuple_sets)
3982                 return 0;
3983
3984         if (tuple_sets & HCLGE_D_PORT_BIT)
3985                 nfc->data |= RXH_L4_B_2_3;
3986         if (tuple_sets & HCLGE_S_PORT_BIT)
3987                 nfc->data |= RXH_L4_B_0_1;
3988         if (tuple_sets & HCLGE_D_IP_BIT)
3989                 nfc->data |= RXH_IP_DST;
3990         if (tuple_sets & HCLGE_S_IP_BIT)
3991                 nfc->data |= RXH_IP_SRC;
3992
3993         return 0;
3994 }
3995
3996 static int hclge_get_tc_size(struct hnae3_handle *handle)
3997 {
3998         struct hclge_vport *vport = hclge_get_vport(handle);
3999         struct hclge_dev *hdev = vport->back;
4000
4001         return hdev->rss_size_max;
4002 }
4003
4004 int hclge_rss_init_hw(struct hclge_dev *hdev)
4005 {
4006         struct hclge_vport *vport = hdev->vport;
4007         u8 *rss_indir = vport[0].rss_indirection_tbl;
4008         u16 rss_size = vport[0].alloc_rss_size;
4009         u8 *key = vport[0].rss_hash_key;
4010         u8 hfunc = vport[0].rss_algo;
4011         u16 tc_offset[HCLGE_MAX_TC_NUM];
4012         u16 tc_valid[HCLGE_MAX_TC_NUM];
4013         u16 tc_size[HCLGE_MAX_TC_NUM];
4014         u16 roundup_size;
4015         int i, ret;
4016
4017         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4018         if (ret)
4019                 return ret;
4020
4021         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4022         if (ret)
4023                 return ret;
4024
4025         ret = hclge_set_rss_input_tuple(hdev);
4026         if (ret)
4027                 return ret;
4028
4029         /* Each TC have the same queue size, and tc_size set to hardware is
4030          * the log2 of roundup power of two of rss_size, the acutal queue
4031          * size is limited by indirection table.
4032          */
4033         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4034                 dev_err(&hdev->pdev->dev,
4035                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4036                         rss_size);
4037                 return -EINVAL;
4038         }
4039
4040         roundup_size = roundup_pow_of_two(rss_size);
4041         roundup_size = ilog2(roundup_size);
4042
4043         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4044                 tc_valid[i] = 0;
4045
4046                 if (!(hdev->hw_tc_map & BIT(i)))
4047                         continue;
4048
4049                 tc_valid[i] = 1;
4050                 tc_size[i] = roundup_size;
4051                 tc_offset[i] = rss_size * i;
4052         }
4053
4054         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4055 }
4056
4057 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4058 {
4059         struct hclge_vport *vport = hdev->vport;
4060         int i, j;
4061
4062         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4063                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4064                         vport[j].rss_indirection_tbl[i] =
4065                                 i % vport[j].alloc_rss_size;
4066         }
4067 }
4068
4069 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4070 {
4071         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4072         struct hclge_vport *vport = hdev->vport;
4073
4074         if (hdev->pdev->revision >= 0x21)
4075                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4076
4077         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4078                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4079                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4080                 vport[i].rss_tuple_sets.ipv4_udp_en =
4081                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4082                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4083                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4084                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4085                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4086                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4087                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4088                 vport[i].rss_tuple_sets.ipv6_udp_en =
4089                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4090                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4091                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4092                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4093                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4094
4095                 vport[i].rss_algo = rss_algo;
4096
4097                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4098                        HCLGE_RSS_KEY_SIZE);
4099         }
4100
4101         hclge_rss_indir_init_cfg(hdev);
4102 }
4103
4104 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4105                                 int vector_id, bool en,
4106                                 struct hnae3_ring_chain_node *ring_chain)
4107 {
4108         struct hclge_dev *hdev = vport->back;
4109         struct hnae3_ring_chain_node *node;
4110         struct hclge_desc desc;
4111         struct hclge_ctrl_vector_chain_cmd *req
4112                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4113         enum hclge_cmd_status status;
4114         enum hclge_opcode_type op;
4115         u16 tqp_type_and_id;
4116         int i;
4117
4118         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4119         hclge_cmd_setup_basic_desc(&desc, op, false);
4120         req->int_vector_id = vector_id;
4121
4122         i = 0;
4123         for (node = ring_chain; node; node = node->next) {
4124                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4125                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4126                                 HCLGE_INT_TYPE_S,
4127                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4128                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4129                                 HCLGE_TQP_ID_S, node->tqp_index);
4130                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4131                                 HCLGE_INT_GL_IDX_S,
4132                                 hnae3_get_field(node->int_gl_idx,
4133                                                 HNAE3_RING_GL_IDX_M,
4134                                                 HNAE3_RING_GL_IDX_S));
4135                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4136                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4137                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4138                         req->vfid = vport->vport_id;
4139
4140                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4141                         if (status) {
4142                                 dev_err(&hdev->pdev->dev,
4143                                         "Map TQP fail, status is %d.\n",
4144                                         status);
4145                                 return -EIO;
4146                         }
4147                         i = 0;
4148
4149                         hclge_cmd_setup_basic_desc(&desc,
4150                                                    op,
4151                                                    false);
4152                         req->int_vector_id = vector_id;
4153                 }
4154         }
4155
4156         if (i > 0) {
4157                 req->int_cause_num = i;
4158                 req->vfid = vport->vport_id;
4159                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4160                 if (status) {
4161                         dev_err(&hdev->pdev->dev,
4162                                 "Map TQP fail, status is %d.\n", status);
4163                         return -EIO;
4164                 }
4165         }
4166
4167         return 0;
4168 }
4169
4170 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4171                                     int vector,
4172                                     struct hnae3_ring_chain_node *ring_chain)
4173 {
4174         struct hclge_vport *vport = hclge_get_vport(handle);
4175         struct hclge_dev *hdev = vport->back;
4176         int vector_id;
4177
4178         vector_id = hclge_get_vector_index(hdev, vector);
4179         if (vector_id < 0) {
4180                 dev_err(&hdev->pdev->dev,
4181                         "Get vector index fail. vector_id =%d\n", vector_id);
4182                 return vector_id;
4183         }
4184
4185         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4186 }
4187
4188 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4189                                        int vector,
4190                                        struct hnae3_ring_chain_node *ring_chain)
4191 {
4192         struct hclge_vport *vport = hclge_get_vport(handle);
4193         struct hclge_dev *hdev = vport->back;
4194         int vector_id, ret;
4195
4196         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4197                 return 0;
4198
4199         vector_id = hclge_get_vector_index(hdev, vector);
4200         if (vector_id < 0) {
4201                 dev_err(&handle->pdev->dev,
4202                         "Get vector index fail. ret =%d\n", vector_id);
4203                 return vector_id;
4204         }
4205
4206         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4207         if (ret)
4208                 dev_err(&handle->pdev->dev,
4209                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4210                         vector_id,
4211                         ret);
4212
4213         return ret;
4214 }
4215
4216 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4217                                struct hclge_promisc_param *param)
4218 {
4219         struct hclge_promisc_cfg_cmd *req;
4220         struct hclge_desc desc;
4221         int ret;
4222
4223         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4224
4225         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4226         req->vf_id = param->vf_id;
4227
4228         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4229          * pdev revision(0x20), new revision support them. The
4230          * value of this two fields will not return error when driver
4231          * send command to fireware in revision(0x20).
4232          */
4233         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4234                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4235
4236         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4237         if (ret)
4238                 dev_err(&hdev->pdev->dev,
4239                         "Set promisc mode fail, status is %d.\n", ret);
4240
4241         return ret;
4242 }
4243
4244 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4245                               bool en_mc, bool en_bc, int vport_id)
4246 {
4247         if (!param)
4248                 return;
4249
4250         memset(param, 0, sizeof(struct hclge_promisc_param));
4251         if (en_uc)
4252                 param->enable = HCLGE_PROMISC_EN_UC;
4253         if (en_mc)
4254                 param->enable |= HCLGE_PROMISC_EN_MC;
4255         if (en_bc)
4256                 param->enable |= HCLGE_PROMISC_EN_BC;
4257         param->vf_id = vport_id;
4258 }
4259
4260 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4261                                   bool en_mc_pmc)
4262 {
4263         struct hclge_vport *vport = hclge_get_vport(handle);
4264         struct hclge_dev *hdev = vport->back;
4265         struct hclge_promisc_param param;
4266         bool en_bc_pmc = true;
4267
4268         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4269          * always bypassed. So broadcast promisc should be disabled until
4270          * user enable promisc mode
4271          */
4272         if (handle->pdev->revision == 0x20)
4273                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4274
4275         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4276                                  vport->vport_id);
4277         return hclge_cmd_set_promisc_mode(hdev, &param);
4278 }
4279
4280 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4281 {
4282         struct hclge_get_fd_mode_cmd *req;
4283         struct hclge_desc desc;
4284         int ret;
4285
4286         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4287
4288         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4289
4290         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4291         if (ret) {
4292                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4293                 return ret;
4294         }
4295
4296         *fd_mode = req->mode;
4297
4298         return ret;
4299 }
4300
4301 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4302                                    u32 *stage1_entry_num,
4303                                    u32 *stage2_entry_num,
4304                                    u16 *stage1_counter_num,
4305                                    u16 *stage2_counter_num)
4306 {
4307         struct hclge_get_fd_allocation_cmd *req;
4308         struct hclge_desc desc;
4309         int ret;
4310
4311         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4312
4313         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4314
4315         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4316         if (ret) {
4317                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4318                         ret);
4319                 return ret;
4320         }
4321
4322         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4323         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4324         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4325         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4326
4327         return ret;
4328 }
4329
4330 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4331 {
4332         struct hclge_set_fd_key_config_cmd *req;
4333         struct hclge_fd_key_cfg *stage;
4334         struct hclge_desc desc;
4335         int ret;
4336
4337         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4338
4339         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4340         stage = &hdev->fd_cfg.key_cfg[stage_num];
4341         req->stage = stage_num;
4342         req->key_select = stage->key_sel;
4343         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4344         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4345         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4346         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4347         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4348         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4349
4350         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4351         if (ret)
4352                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4353
4354         return ret;
4355 }
4356
4357 static int hclge_init_fd_config(struct hclge_dev *hdev)
4358 {
4359 #define LOW_2_WORDS             0x03
4360         struct hclge_fd_key_cfg *key_cfg;
4361         int ret;
4362
4363         if (!hnae3_dev_fd_supported(hdev))
4364                 return 0;
4365
4366         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4367         if (ret)
4368                 return ret;
4369
4370         switch (hdev->fd_cfg.fd_mode) {
4371         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4372                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4373                 break;
4374         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4375                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4376                 break;
4377         default:
4378                 dev_err(&hdev->pdev->dev,
4379                         "Unsupported flow director mode %d\n",
4380                         hdev->fd_cfg.fd_mode);
4381                 return -EOPNOTSUPP;
4382         }
4383
4384         hdev->fd_cfg.proto_support =
4385                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4386                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4387         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4388         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4389         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4390         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4391         key_cfg->outer_sipv6_word_en = 0;
4392         key_cfg->outer_dipv6_word_en = 0;
4393
4394         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4395                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4396                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4397                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4398
4399         /* If use max 400bit key, we can support tuples for ether type */
4400         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4401                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4402                 key_cfg->tuple_active |=
4403                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4404         }
4405
4406         /* roce_type is used to filter roce frames
4407          * dst_vport is used to specify the rule
4408          */
4409         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4410
4411         ret = hclge_get_fd_allocation(hdev,
4412                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4413                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4414                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4415                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4416         if (ret)
4417                 return ret;
4418
4419         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4420 }
4421
4422 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4423                                 int loc, u8 *key, bool is_add)
4424 {
4425         struct hclge_fd_tcam_config_1_cmd *req1;
4426         struct hclge_fd_tcam_config_2_cmd *req2;
4427         struct hclge_fd_tcam_config_3_cmd *req3;
4428         struct hclge_desc desc[3];
4429         int ret;
4430
4431         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4432         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4433         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4434         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4435         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4436
4437         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4438         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4439         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4440
4441         req1->stage = stage;
4442         req1->xy_sel = sel_x ? 1 : 0;
4443         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4444         req1->index = cpu_to_le32(loc);
4445         req1->entry_vld = sel_x ? is_add : 0;
4446
4447         if (key) {
4448                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4449                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4450                        sizeof(req2->tcam_data));
4451                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4452                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4453         }
4454
4455         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4456         if (ret)
4457                 dev_err(&hdev->pdev->dev,
4458                         "config tcam key fail, ret=%d\n",
4459                         ret);
4460
4461         return ret;
4462 }
4463
4464 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4465                               struct hclge_fd_ad_data *action)
4466 {
4467         struct hclge_fd_ad_config_cmd *req;
4468         struct hclge_desc desc;
4469         u64 ad_data = 0;
4470         int ret;
4471
4472         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4473
4474         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4475         req->index = cpu_to_le32(loc);
4476         req->stage = stage;
4477
4478         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4479                       action->write_rule_id_to_bd);
4480         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4481                         action->rule_id);
4482         ad_data <<= 32;
4483         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4484         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4485                       action->forward_to_direct_queue);
4486         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4487                         action->queue_id);
4488         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4489         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4490                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4491         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4492         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4493                         action->counter_id);
4494
4495         req->ad_data = cpu_to_le64(ad_data);
4496         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4497         if (ret)
4498                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4499
4500         return ret;
4501 }
4502
4503 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4504                                    struct hclge_fd_rule *rule)
4505 {
4506         u16 tmp_x_s, tmp_y_s;
4507         u32 tmp_x_l, tmp_y_l;
4508         int i;
4509
4510         if (rule->unused_tuple & tuple_bit)
4511                 return true;
4512
4513         switch (tuple_bit) {
4514         case 0:
4515                 return false;
4516         case BIT(INNER_DST_MAC):
4517                 for (i = 0; i < ETH_ALEN; i++) {
4518                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4519                                rule->tuples_mask.dst_mac[i]);
4520                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4521                                rule->tuples_mask.dst_mac[i]);
4522                 }
4523
4524                 return true;
4525         case BIT(INNER_SRC_MAC):
4526                 for (i = 0; i < ETH_ALEN; i++) {
4527                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4528                                rule->tuples.src_mac[i]);
4529                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4530                                rule->tuples.src_mac[i]);
4531                 }
4532
4533                 return true;
4534         case BIT(INNER_VLAN_TAG_FST):
4535                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4536                        rule->tuples_mask.vlan_tag1);
4537                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4538                        rule->tuples_mask.vlan_tag1);
4539                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4540                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4541
4542                 return true;
4543         case BIT(INNER_ETH_TYPE):
4544                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4545                        rule->tuples_mask.ether_proto);
4546                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4547                        rule->tuples_mask.ether_proto);
4548                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4549                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4550
4551                 return true;
4552         case BIT(INNER_IP_TOS):
4553                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4554                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4555
4556                 return true;
4557         case BIT(INNER_IP_PROTO):
4558                 calc_x(*key_x, rule->tuples.ip_proto,
4559                        rule->tuples_mask.ip_proto);
4560                 calc_y(*key_y, rule->tuples.ip_proto,
4561                        rule->tuples_mask.ip_proto);
4562
4563                 return true;
4564         case BIT(INNER_SRC_IP):
4565                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4566                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4567                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4568                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4569                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4570                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4571
4572                 return true;
4573         case BIT(INNER_DST_IP):
4574                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4575                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4576                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4577                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4578                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4579                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4580
4581                 return true;
4582         case BIT(INNER_SRC_PORT):
4583                 calc_x(tmp_x_s, rule->tuples.src_port,
4584                        rule->tuples_mask.src_port);
4585                 calc_y(tmp_y_s, rule->tuples.src_port,
4586                        rule->tuples_mask.src_port);
4587                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4588                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4589
4590                 return true;
4591         case BIT(INNER_DST_PORT):
4592                 calc_x(tmp_x_s, rule->tuples.dst_port,
4593                        rule->tuples_mask.dst_port);
4594                 calc_y(tmp_y_s, rule->tuples.dst_port,
4595                        rule->tuples_mask.dst_port);
4596                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4597                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4598
4599                 return true;
4600         default:
4601                 return false;
4602         }
4603 }
4604
4605 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4606                                  u8 vf_id, u8 network_port_id)
4607 {
4608         u32 port_number = 0;
4609
4610         if (port_type == HOST_PORT) {
4611                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4612                                 pf_id);
4613                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4614                                 vf_id);
4615                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4616         } else {
4617                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4618                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4619                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4620         }
4621
4622         return port_number;
4623 }
4624
4625 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4626                                        __le32 *key_x, __le32 *key_y,
4627                                        struct hclge_fd_rule *rule)
4628 {
4629         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4630         u8 cur_pos = 0, tuple_size, shift_bits;
4631         int i;
4632
4633         for (i = 0; i < MAX_META_DATA; i++) {
4634                 tuple_size = meta_data_key_info[i].key_length;
4635                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4636
4637                 switch (tuple_bit) {
4638                 case BIT(ROCE_TYPE):
4639                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4640                         cur_pos += tuple_size;
4641                         break;
4642                 case BIT(DST_VPORT):
4643                         port_number = hclge_get_port_number(HOST_PORT, 0,
4644                                                             rule->vf_id, 0);
4645                         hnae3_set_field(meta_data,
4646                                         GENMASK(cur_pos + tuple_size, cur_pos),
4647                                         cur_pos, port_number);
4648                         cur_pos += tuple_size;
4649                         break;
4650                 default:
4651                         break;
4652                 }
4653         }
4654
4655         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4656         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4657         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4658
4659         *key_x = cpu_to_le32(tmp_x << shift_bits);
4660         *key_y = cpu_to_le32(tmp_y << shift_bits);
4661 }
4662
4663 /* A complete key is combined with meta data key and tuple key.
4664  * Meta data key is stored at the MSB region, and tuple key is stored at
4665  * the LSB region, unused bits will be filled 0.
4666  */
4667 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4668                             struct hclge_fd_rule *rule)
4669 {
4670         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4671         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4672         u8 *cur_key_x, *cur_key_y;
4673         int i, ret, tuple_size;
4674         u8 meta_data_region;
4675
4676         memset(key_x, 0, sizeof(key_x));
4677         memset(key_y, 0, sizeof(key_y));
4678         cur_key_x = key_x;
4679         cur_key_y = key_y;
4680
4681         for (i = 0 ; i < MAX_TUPLE; i++) {
4682                 bool tuple_valid;
4683                 u32 check_tuple;
4684
4685                 tuple_size = tuple_key_info[i].key_length / 8;
4686                 check_tuple = key_cfg->tuple_active & BIT(i);
4687
4688                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4689                                                      cur_key_y, rule);
4690                 if (tuple_valid) {
4691                         cur_key_x += tuple_size;
4692                         cur_key_y += tuple_size;
4693                 }
4694         }
4695
4696         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4697                         MAX_META_DATA_LENGTH / 8;
4698
4699         hclge_fd_convert_meta_data(key_cfg,
4700                                    (__le32 *)(key_x + meta_data_region),
4701                                    (__le32 *)(key_y + meta_data_region),
4702                                    rule);
4703
4704         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4705                                    true);
4706         if (ret) {
4707                 dev_err(&hdev->pdev->dev,
4708                         "fd key_y config fail, loc=%d, ret=%d\n",
4709                         rule->queue_id, ret);
4710                 return ret;
4711         }
4712
4713         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4714                                    true);
4715         if (ret)
4716                 dev_err(&hdev->pdev->dev,
4717                         "fd key_x config fail, loc=%d, ret=%d\n",
4718                         rule->queue_id, ret);
4719         return ret;
4720 }
4721
4722 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4723                                struct hclge_fd_rule *rule)
4724 {
4725         struct hclge_fd_ad_data ad_data;
4726
4727         ad_data.ad_id = rule->location;
4728
4729         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4730                 ad_data.drop_packet = true;
4731                 ad_data.forward_to_direct_queue = false;
4732                 ad_data.queue_id = 0;
4733         } else {
4734                 ad_data.drop_packet = false;
4735                 ad_data.forward_to_direct_queue = true;
4736                 ad_data.queue_id = rule->queue_id;
4737         }
4738
4739         ad_data.use_counter = false;
4740         ad_data.counter_id = 0;
4741
4742         ad_data.use_next_stage = false;
4743         ad_data.next_input_key = 0;
4744
4745         ad_data.write_rule_id_to_bd = true;
4746         ad_data.rule_id = rule->location;
4747
4748         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4749 }
4750
4751 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4752                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4753 {
4754         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4755         struct ethtool_usrip4_spec *usr_ip4_spec;
4756         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4757         struct ethtool_usrip6_spec *usr_ip6_spec;
4758         struct ethhdr *ether_spec;
4759
4760         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4761                 return -EINVAL;
4762
4763         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4764                 return -EOPNOTSUPP;
4765
4766         if ((fs->flow_type & FLOW_EXT) &&
4767             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4768                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4769                 return -EOPNOTSUPP;
4770         }
4771
4772         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4773         case SCTP_V4_FLOW:
4774         case TCP_V4_FLOW:
4775         case UDP_V4_FLOW:
4776                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4777                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4778
4779                 if (!tcp_ip4_spec->ip4src)
4780                         *unused |= BIT(INNER_SRC_IP);
4781
4782                 if (!tcp_ip4_spec->ip4dst)
4783                         *unused |= BIT(INNER_DST_IP);
4784
4785                 if (!tcp_ip4_spec->psrc)
4786                         *unused |= BIT(INNER_SRC_PORT);
4787
4788                 if (!tcp_ip4_spec->pdst)
4789                         *unused |= BIT(INNER_DST_PORT);
4790
4791                 if (!tcp_ip4_spec->tos)
4792                         *unused |= BIT(INNER_IP_TOS);
4793
4794                 break;
4795         case IP_USER_FLOW:
4796                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4797                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4798                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4799
4800                 if (!usr_ip4_spec->ip4src)
4801                         *unused |= BIT(INNER_SRC_IP);
4802
4803                 if (!usr_ip4_spec->ip4dst)
4804                         *unused |= BIT(INNER_DST_IP);
4805
4806                 if (!usr_ip4_spec->tos)
4807                         *unused |= BIT(INNER_IP_TOS);
4808
4809                 if (!usr_ip4_spec->proto)
4810                         *unused |= BIT(INNER_IP_PROTO);
4811
4812                 if (usr_ip4_spec->l4_4_bytes)
4813                         return -EOPNOTSUPP;
4814
4815                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4816                         return -EOPNOTSUPP;
4817
4818                 break;
4819         case SCTP_V6_FLOW:
4820         case TCP_V6_FLOW:
4821         case UDP_V6_FLOW:
4822                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4823                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4824                         BIT(INNER_IP_TOS);
4825
4826                 /* check whether src/dst ip address used */
4827                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4828                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4829                         *unused |= BIT(INNER_SRC_IP);
4830
4831                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4832                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4833                         *unused |= BIT(INNER_DST_IP);
4834
4835                 if (!tcp_ip6_spec->psrc)
4836                         *unused |= BIT(INNER_SRC_PORT);
4837
4838                 if (!tcp_ip6_spec->pdst)
4839                         *unused |= BIT(INNER_DST_PORT);
4840
4841                 if (tcp_ip6_spec->tclass)
4842                         return -EOPNOTSUPP;
4843
4844                 break;
4845         case IPV6_USER_FLOW:
4846                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4847                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4848                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4849                         BIT(INNER_DST_PORT);
4850
4851                 /* check whether src/dst ip address used */
4852                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4853                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4854                         *unused |= BIT(INNER_SRC_IP);
4855
4856                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4857                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4858                         *unused |= BIT(INNER_DST_IP);
4859
4860                 if (!usr_ip6_spec->l4_proto)
4861                         *unused |= BIT(INNER_IP_PROTO);
4862
4863                 if (usr_ip6_spec->tclass)
4864                         return -EOPNOTSUPP;
4865
4866                 if (usr_ip6_spec->l4_4_bytes)
4867                         return -EOPNOTSUPP;
4868
4869                 break;
4870         case ETHER_FLOW:
4871                 ether_spec = &fs->h_u.ether_spec;
4872                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4873                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4874                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4875
4876                 if (is_zero_ether_addr(ether_spec->h_source))
4877                         *unused |= BIT(INNER_SRC_MAC);
4878
4879                 if (is_zero_ether_addr(ether_spec->h_dest))
4880                         *unused |= BIT(INNER_DST_MAC);
4881
4882                 if (!ether_spec->h_proto)
4883                         *unused |= BIT(INNER_ETH_TYPE);
4884
4885                 break;
4886         default:
4887                 return -EOPNOTSUPP;
4888         }
4889
4890         if ((fs->flow_type & FLOW_EXT)) {
4891                 if (fs->h_ext.vlan_etype)
4892                         return -EOPNOTSUPP;
4893                 if (!fs->h_ext.vlan_tci)
4894                         *unused |= BIT(INNER_VLAN_TAG_FST);
4895
4896                 if (fs->m_ext.vlan_tci) {
4897                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4898                                 return -EINVAL;
4899                 }
4900         } else {
4901                 *unused |= BIT(INNER_VLAN_TAG_FST);
4902         }
4903
4904         if (fs->flow_type & FLOW_MAC_EXT) {
4905                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4906                         return -EOPNOTSUPP;
4907
4908                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4909                         *unused |= BIT(INNER_DST_MAC);
4910                 else
4911                         *unused &= ~(BIT(INNER_DST_MAC));
4912         }
4913
4914         return 0;
4915 }
4916
4917 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4918 {
4919         struct hclge_fd_rule *rule = NULL;
4920         struct hlist_node *node2;
4921
4922         spin_lock_bh(&hdev->fd_rule_lock);
4923         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4924                 if (rule->location >= location)
4925                         break;
4926         }
4927
4928         spin_unlock_bh(&hdev->fd_rule_lock);
4929
4930         return  rule && rule->location == location;
4931 }
4932
4933 /* make sure being called after lock up with fd_rule_lock */
4934 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4935                                      struct hclge_fd_rule *new_rule,
4936                                      u16 location,
4937                                      bool is_add)
4938 {
4939         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4940         struct hlist_node *node2;
4941
4942         if (is_add && !new_rule)
4943                 return -EINVAL;
4944
4945         hlist_for_each_entry_safe(rule, node2,
4946                                   &hdev->fd_rule_list, rule_node) {
4947                 if (rule->location >= location)
4948                         break;
4949                 parent = rule;
4950         }
4951
4952         if (rule && rule->location == location) {
4953                 hlist_del(&rule->rule_node);
4954                 kfree(rule);
4955                 hdev->hclge_fd_rule_num--;
4956
4957                 if (!is_add) {
4958                         if (!hdev->hclge_fd_rule_num)
4959                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4960                         clear_bit(location, hdev->fd_bmap);
4961
4962                         return 0;
4963                 }
4964         } else if (!is_add) {
4965                 dev_err(&hdev->pdev->dev,
4966                         "delete fail, rule %d is inexistent\n",
4967                         location);
4968                 return -EINVAL;
4969         }
4970
4971         INIT_HLIST_NODE(&new_rule->rule_node);
4972
4973         if (parent)
4974                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4975         else
4976                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4977
4978         set_bit(location, hdev->fd_bmap);
4979         hdev->hclge_fd_rule_num++;
4980         hdev->fd_active_type = new_rule->rule_type;
4981
4982         return 0;
4983 }
4984
4985 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4986                               struct ethtool_rx_flow_spec *fs,
4987                               struct hclge_fd_rule *rule)
4988 {
4989         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4990
4991         switch (flow_type) {
4992         case SCTP_V4_FLOW:
4993         case TCP_V4_FLOW:
4994         case UDP_V4_FLOW:
4995                 rule->tuples.src_ip[IPV4_INDEX] =
4996                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4997                 rule->tuples_mask.src_ip[IPV4_INDEX] =
4998                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4999
5000                 rule->tuples.dst_ip[IPV4_INDEX] =
5001                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5002                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5003                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5004
5005                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5006                 rule->tuples_mask.src_port =
5007                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5008
5009                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5010                 rule->tuples_mask.dst_port =
5011                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5012
5013                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5014                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5015
5016                 rule->tuples.ether_proto = ETH_P_IP;
5017                 rule->tuples_mask.ether_proto = 0xFFFF;
5018
5019                 break;
5020         case IP_USER_FLOW:
5021                 rule->tuples.src_ip[IPV4_INDEX] =
5022                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5023                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5024                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5025
5026                 rule->tuples.dst_ip[IPV4_INDEX] =
5027                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5028                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5029                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5030
5031                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5032                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5033
5034                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5035                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5036
5037                 rule->tuples.ether_proto = ETH_P_IP;
5038                 rule->tuples_mask.ether_proto = 0xFFFF;
5039
5040                 break;
5041         case SCTP_V6_FLOW:
5042         case TCP_V6_FLOW:
5043         case UDP_V6_FLOW:
5044                 be32_to_cpu_array(rule->tuples.src_ip,
5045                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5046                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5047                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5048
5049                 be32_to_cpu_array(rule->tuples.dst_ip,
5050                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5051                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5052                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5053
5054                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5055                 rule->tuples_mask.src_port =
5056                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5057
5058                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5059                 rule->tuples_mask.dst_port =
5060                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5061
5062                 rule->tuples.ether_proto = ETH_P_IPV6;
5063                 rule->tuples_mask.ether_proto = 0xFFFF;
5064
5065                 break;
5066         case IPV6_USER_FLOW:
5067                 be32_to_cpu_array(rule->tuples.src_ip,
5068                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5069                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5070                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5071
5072                 be32_to_cpu_array(rule->tuples.dst_ip,
5073                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5074                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5075                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5076
5077                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5078                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5079
5080                 rule->tuples.ether_proto = ETH_P_IPV6;
5081                 rule->tuples_mask.ether_proto = 0xFFFF;
5082
5083                 break;
5084         case ETHER_FLOW:
5085                 ether_addr_copy(rule->tuples.src_mac,
5086                                 fs->h_u.ether_spec.h_source);
5087                 ether_addr_copy(rule->tuples_mask.src_mac,
5088                                 fs->m_u.ether_spec.h_source);
5089
5090                 ether_addr_copy(rule->tuples.dst_mac,
5091                                 fs->h_u.ether_spec.h_dest);
5092                 ether_addr_copy(rule->tuples_mask.dst_mac,
5093                                 fs->m_u.ether_spec.h_dest);
5094
5095                 rule->tuples.ether_proto =
5096                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5097                 rule->tuples_mask.ether_proto =
5098                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5099
5100                 break;
5101         default:
5102                 return -EOPNOTSUPP;
5103         }
5104
5105         switch (flow_type) {
5106         case SCTP_V4_FLOW:
5107         case SCTP_V6_FLOW:
5108                 rule->tuples.ip_proto = IPPROTO_SCTP;
5109                 rule->tuples_mask.ip_proto = 0xFF;
5110                 break;
5111         case TCP_V4_FLOW:
5112         case TCP_V6_FLOW:
5113                 rule->tuples.ip_proto = IPPROTO_TCP;
5114                 rule->tuples_mask.ip_proto = 0xFF;
5115                 break;
5116         case UDP_V4_FLOW:
5117         case UDP_V6_FLOW:
5118                 rule->tuples.ip_proto = IPPROTO_UDP;
5119                 rule->tuples_mask.ip_proto = 0xFF;
5120                 break;
5121         default:
5122                 break;
5123         }
5124
5125         if ((fs->flow_type & FLOW_EXT)) {
5126                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5127                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5128         }
5129
5130         if (fs->flow_type & FLOW_MAC_EXT) {
5131                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5132                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5133         }
5134
5135         return 0;
5136 }
5137
5138 /* make sure being called after lock up with fd_rule_lock */
5139 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5140                                 struct hclge_fd_rule *rule)
5141 {
5142         int ret;
5143
5144         if (!rule) {
5145                 dev_err(&hdev->pdev->dev,
5146                         "The flow director rule is NULL\n");
5147                 return -EINVAL;
5148         }
5149
5150         /* it will never fail here, so needn't to check return value */
5151         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5152
5153         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5154         if (ret)
5155                 goto clear_rule;
5156
5157         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5158         if (ret)
5159                 goto clear_rule;
5160
5161         return 0;
5162
5163 clear_rule:
5164         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5165         return ret;
5166 }
5167
5168 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5169                               struct ethtool_rxnfc *cmd)
5170 {
5171         struct hclge_vport *vport = hclge_get_vport(handle);
5172         struct hclge_dev *hdev = vport->back;
5173         u16 dst_vport_id = 0, q_index = 0;
5174         struct ethtool_rx_flow_spec *fs;
5175         struct hclge_fd_rule *rule;
5176         u32 unused = 0;
5177         u8 action;
5178         int ret;
5179
5180         if (!hnae3_dev_fd_supported(hdev))
5181                 return -EOPNOTSUPP;
5182
5183         if (!hdev->fd_en) {
5184                 dev_warn(&hdev->pdev->dev,
5185                          "Please enable flow director first\n");
5186                 return -EOPNOTSUPP;
5187         }
5188
5189         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5190
5191         ret = hclge_fd_check_spec(hdev, fs, &unused);
5192         if (ret) {
5193                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5194                 return ret;
5195         }
5196
5197         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5198                 action = HCLGE_FD_ACTION_DROP_PACKET;
5199         } else {
5200                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5201                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5202                 u16 tqps;
5203
5204                 if (vf > hdev->num_req_vfs) {
5205                         dev_err(&hdev->pdev->dev,
5206                                 "Error: vf id (%d) > max vf num (%d)\n",
5207                                 vf, hdev->num_req_vfs);
5208                         return -EINVAL;
5209                 }
5210
5211                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5212                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5213
5214                 if (ring >= tqps) {
5215                         dev_err(&hdev->pdev->dev,
5216                                 "Error: queue id (%d) > max tqp num (%d)\n",
5217                                 ring, tqps - 1);
5218                         return -EINVAL;
5219                 }
5220
5221                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5222                 q_index = ring;
5223         }
5224
5225         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5226         if (!rule)
5227                 return -ENOMEM;
5228
5229         ret = hclge_fd_get_tuple(hdev, fs, rule);
5230         if (ret) {
5231                 kfree(rule);
5232                 return ret;
5233         }
5234
5235         rule->flow_type = fs->flow_type;
5236
5237         rule->location = fs->location;
5238         rule->unused_tuple = unused;
5239         rule->vf_id = dst_vport_id;
5240         rule->queue_id = q_index;
5241         rule->action = action;
5242         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5243
5244         /* to avoid rule conflict, when user configure rule by ethtool,
5245          * we need to clear all arfs rules
5246          */
5247         hclge_clear_arfs_rules(handle);
5248
5249         spin_lock_bh(&hdev->fd_rule_lock);
5250         ret = hclge_fd_config_rule(hdev, rule);
5251
5252         spin_unlock_bh(&hdev->fd_rule_lock);
5253
5254         return ret;
5255 }
5256
5257 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5258                               struct ethtool_rxnfc *cmd)
5259 {
5260         struct hclge_vport *vport = hclge_get_vport(handle);
5261         struct hclge_dev *hdev = vport->back;
5262         struct ethtool_rx_flow_spec *fs;
5263         int ret;
5264
5265         if (!hnae3_dev_fd_supported(hdev))
5266                 return -EOPNOTSUPP;
5267
5268         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5269
5270         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5271                 return -EINVAL;
5272
5273         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5274                 dev_err(&hdev->pdev->dev,
5275                         "Delete fail, rule %d is inexistent\n",
5276                         fs->location);
5277                 return -ENOENT;
5278         }
5279
5280         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5281                                    fs->location, NULL, false);
5282         if (ret)
5283                 return ret;
5284
5285         spin_lock_bh(&hdev->fd_rule_lock);
5286         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5287
5288         spin_unlock_bh(&hdev->fd_rule_lock);
5289
5290         return ret;
5291 }
5292
5293 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5294                                      bool clear_list)
5295 {
5296         struct hclge_vport *vport = hclge_get_vport(handle);
5297         struct hclge_dev *hdev = vport->back;
5298         struct hclge_fd_rule *rule;
5299         struct hlist_node *node;
5300         u16 location;
5301
5302         if (!hnae3_dev_fd_supported(hdev))
5303                 return;
5304
5305         spin_lock_bh(&hdev->fd_rule_lock);
5306         for_each_set_bit(location, hdev->fd_bmap,
5307                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5308                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5309                                      NULL, false);
5310
5311         if (clear_list) {
5312                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5313                                           rule_node) {
5314                         hlist_del(&rule->rule_node);
5315                         kfree(rule);
5316                 }
5317                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5318                 hdev->hclge_fd_rule_num = 0;
5319                 bitmap_zero(hdev->fd_bmap,
5320                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5321         }
5322
5323         spin_unlock_bh(&hdev->fd_rule_lock);
5324 }
5325
5326 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5327 {
5328         struct hclge_vport *vport = hclge_get_vport(handle);
5329         struct hclge_dev *hdev = vport->back;
5330         struct hclge_fd_rule *rule;
5331         struct hlist_node *node;
5332         int ret;
5333
5334         /* Return ok here, because reset error handling will check this
5335          * return value. If error is returned here, the reset process will
5336          * fail.
5337          */
5338         if (!hnae3_dev_fd_supported(hdev))
5339                 return 0;
5340
5341         /* if fd is disabled, should not restore it when reset */
5342         if (!hdev->fd_en)
5343                 return 0;
5344
5345         spin_lock_bh(&hdev->fd_rule_lock);
5346         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5347                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5348                 if (!ret)
5349                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5350
5351                 if (ret) {
5352                         dev_warn(&hdev->pdev->dev,
5353                                  "Restore rule %d failed, remove it\n",
5354                                  rule->location);
5355                         clear_bit(rule->location, hdev->fd_bmap);
5356                         hlist_del(&rule->rule_node);
5357                         kfree(rule);
5358                         hdev->hclge_fd_rule_num--;
5359                 }
5360         }
5361
5362         if (hdev->hclge_fd_rule_num)
5363                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5364
5365         spin_unlock_bh(&hdev->fd_rule_lock);
5366
5367         return 0;
5368 }
5369
5370 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5371                                  struct ethtool_rxnfc *cmd)
5372 {
5373         struct hclge_vport *vport = hclge_get_vport(handle);
5374         struct hclge_dev *hdev = vport->back;
5375
5376         if (!hnae3_dev_fd_supported(hdev))
5377                 return -EOPNOTSUPP;
5378
5379         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5380         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5381
5382         return 0;
5383 }
5384
5385 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5386                                   struct ethtool_rxnfc *cmd)
5387 {
5388         struct hclge_vport *vport = hclge_get_vport(handle);
5389         struct hclge_fd_rule *rule = NULL;
5390         struct hclge_dev *hdev = vport->back;
5391         struct ethtool_rx_flow_spec *fs;
5392         struct hlist_node *node2;
5393
5394         if (!hnae3_dev_fd_supported(hdev))
5395                 return -EOPNOTSUPP;
5396
5397         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5398
5399         spin_lock_bh(&hdev->fd_rule_lock);
5400
5401         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5402                 if (rule->location >= fs->location)
5403                         break;
5404         }
5405
5406         if (!rule || fs->location != rule->location) {
5407                 spin_unlock_bh(&hdev->fd_rule_lock);
5408
5409                 return -ENOENT;
5410         }
5411
5412         fs->flow_type = rule->flow_type;
5413         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5414         case SCTP_V4_FLOW:
5415         case TCP_V4_FLOW:
5416         case UDP_V4_FLOW:
5417                 fs->h_u.tcp_ip4_spec.ip4src =
5418                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5419                 fs->m_u.tcp_ip4_spec.ip4src =
5420                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5421                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5422
5423                 fs->h_u.tcp_ip4_spec.ip4dst =
5424                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5425                 fs->m_u.tcp_ip4_spec.ip4dst =
5426                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5427                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5428
5429                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5430                 fs->m_u.tcp_ip4_spec.psrc =
5431                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5432                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5433
5434                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5435                 fs->m_u.tcp_ip4_spec.pdst =
5436                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5437                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5438
5439                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5440                 fs->m_u.tcp_ip4_spec.tos =
5441                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5442                                 0 : rule->tuples_mask.ip_tos;
5443
5444                 break;
5445         case IP_USER_FLOW:
5446                 fs->h_u.usr_ip4_spec.ip4src =
5447                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5448                 fs->m_u.tcp_ip4_spec.ip4src =
5449                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5450                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5451
5452                 fs->h_u.usr_ip4_spec.ip4dst =
5453                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5454                 fs->m_u.usr_ip4_spec.ip4dst =
5455                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5456                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5457
5458                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5459                 fs->m_u.usr_ip4_spec.tos =
5460                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5461                                 0 : rule->tuples_mask.ip_tos;
5462
5463                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5464                 fs->m_u.usr_ip4_spec.proto =
5465                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5466                                 0 : rule->tuples_mask.ip_proto;
5467
5468                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5469
5470                 break;
5471         case SCTP_V6_FLOW:
5472         case TCP_V6_FLOW:
5473         case UDP_V6_FLOW:
5474                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5475                                   rule->tuples.src_ip, IPV6_SIZE);
5476                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5477                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5478                                sizeof(int) * IPV6_SIZE);
5479                 else
5480                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5481                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5482
5483                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5484                                   rule->tuples.dst_ip, IPV6_SIZE);
5485                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5486                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5487                                sizeof(int) * IPV6_SIZE);
5488                 else
5489                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5490                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5491
5492                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5493                 fs->m_u.tcp_ip6_spec.psrc =
5494                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5495                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5496
5497                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5498                 fs->m_u.tcp_ip6_spec.pdst =
5499                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5500                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5501
5502                 break;
5503         case IPV6_USER_FLOW:
5504                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5505                                   rule->tuples.src_ip, IPV6_SIZE);
5506                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5507                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5508                                sizeof(int) * IPV6_SIZE);
5509                 else
5510                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5511                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5512
5513                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5514                                   rule->tuples.dst_ip, IPV6_SIZE);
5515                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5516                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5517                                sizeof(int) * IPV6_SIZE);
5518                 else
5519                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5520                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5521
5522                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5523                 fs->m_u.usr_ip6_spec.l4_proto =
5524                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5525                                 0 : rule->tuples_mask.ip_proto;
5526
5527                 break;
5528         case ETHER_FLOW:
5529                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5530                                 rule->tuples.src_mac);
5531                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5532                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5533                 else
5534                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5535                                         rule->tuples_mask.src_mac);
5536
5537                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5538                                 rule->tuples.dst_mac);
5539                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5540                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5541                 else
5542                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5543                                         rule->tuples_mask.dst_mac);
5544
5545                 fs->h_u.ether_spec.h_proto =
5546                                 cpu_to_be16(rule->tuples.ether_proto);
5547                 fs->m_u.ether_spec.h_proto =
5548                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5549                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5550
5551                 break;
5552         default:
5553                 spin_unlock_bh(&hdev->fd_rule_lock);
5554                 return -EOPNOTSUPP;
5555         }
5556
5557         if (fs->flow_type & FLOW_EXT) {
5558                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5559                 fs->m_ext.vlan_tci =
5560                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5561                                 cpu_to_be16(VLAN_VID_MASK) :
5562                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5563         }
5564
5565         if (fs->flow_type & FLOW_MAC_EXT) {
5566                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5567                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5568                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5569                 else
5570                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5571                                         rule->tuples_mask.dst_mac);
5572         }
5573
5574         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5575                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5576         } else {
5577                 u64 vf_id;
5578
5579                 fs->ring_cookie = rule->queue_id;
5580                 vf_id = rule->vf_id;
5581                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5582                 fs->ring_cookie |= vf_id;
5583         }
5584
5585         spin_unlock_bh(&hdev->fd_rule_lock);
5586
5587         return 0;
5588 }
5589
5590 static int hclge_get_all_rules(struct hnae3_handle *handle,
5591                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5592 {
5593         struct hclge_vport *vport = hclge_get_vport(handle);
5594         struct hclge_dev *hdev = vport->back;
5595         struct hclge_fd_rule *rule;
5596         struct hlist_node *node2;
5597         int cnt = 0;
5598
5599         if (!hnae3_dev_fd_supported(hdev))
5600                 return -EOPNOTSUPP;
5601
5602         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5603
5604         spin_lock_bh(&hdev->fd_rule_lock);
5605         hlist_for_each_entry_safe(rule, node2,
5606                                   &hdev->fd_rule_list, rule_node) {
5607                 if (cnt == cmd->rule_cnt) {
5608                         spin_unlock_bh(&hdev->fd_rule_lock);
5609                         return -EMSGSIZE;
5610                 }
5611
5612                 rule_locs[cnt] = rule->location;
5613                 cnt++;
5614         }
5615
5616         spin_unlock_bh(&hdev->fd_rule_lock);
5617
5618         cmd->rule_cnt = cnt;
5619
5620         return 0;
5621 }
5622
5623 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5624                                      struct hclge_fd_rule_tuples *tuples)
5625 {
5626         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5627         tuples->ip_proto = fkeys->basic.ip_proto;
5628         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5629
5630         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5631                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5632                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5633         } else {
5634                 memcpy(tuples->src_ip,
5635                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5636                        sizeof(tuples->src_ip));
5637                 memcpy(tuples->dst_ip,
5638                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5639                        sizeof(tuples->dst_ip));
5640         }
5641 }
5642
5643 /* traverse all rules, check whether an existed rule has the same tuples */
5644 static struct hclge_fd_rule *
5645 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5646                           const struct hclge_fd_rule_tuples *tuples)
5647 {
5648         struct hclge_fd_rule *rule = NULL;
5649         struct hlist_node *node;
5650
5651         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5652                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5653                         return rule;
5654         }
5655
5656         return NULL;
5657 }
5658
5659 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5660                                      struct hclge_fd_rule *rule)
5661 {
5662         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5663                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5664                              BIT(INNER_SRC_PORT);
5665         rule->action = 0;
5666         rule->vf_id = 0;
5667         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5668         if (tuples->ether_proto == ETH_P_IP) {
5669                 if (tuples->ip_proto == IPPROTO_TCP)
5670                         rule->flow_type = TCP_V4_FLOW;
5671                 else
5672                         rule->flow_type = UDP_V4_FLOW;
5673         } else {
5674                 if (tuples->ip_proto == IPPROTO_TCP)
5675                         rule->flow_type = TCP_V6_FLOW;
5676                 else
5677                         rule->flow_type = UDP_V6_FLOW;
5678         }
5679         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5680         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5681 }
5682
5683 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5684                                       u16 flow_id, struct flow_keys *fkeys)
5685 {
5686         struct hclge_vport *vport = hclge_get_vport(handle);
5687         struct hclge_fd_rule_tuples new_tuples;
5688         struct hclge_dev *hdev = vport->back;
5689         struct hclge_fd_rule *rule;
5690         u16 tmp_queue_id;
5691         u16 bit_id;
5692         int ret;
5693
5694         if (!hnae3_dev_fd_supported(hdev))
5695                 return -EOPNOTSUPP;
5696
5697         memset(&new_tuples, 0, sizeof(new_tuples));
5698         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5699
5700         spin_lock_bh(&hdev->fd_rule_lock);
5701
5702         /* when there is already fd rule existed add by user,
5703          * arfs should not work
5704          */
5705         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5706                 spin_unlock_bh(&hdev->fd_rule_lock);
5707
5708                 return -EOPNOTSUPP;
5709         }
5710
5711         /* check is there flow director filter existed for this flow,
5712          * if not, create a new filter for it;
5713          * if filter exist with different queue id, modify the filter;
5714          * if filter exist with same queue id, do nothing
5715          */
5716         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5717         if (!rule) {
5718                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5719                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5720                         spin_unlock_bh(&hdev->fd_rule_lock);
5721
5722                         return -ENOSPC;
5723                 }
5724
5725                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5726                 if (!rule) {
5727                         spin_unlock_bh(&hdev->fd_rule_lock);
5728
5729                         return -ENOMEM;
5730                 }
5731
5732                 set_bit(bit_id, hdev->fd_bmap);
5733                 rule->location = bit_id;
5734                 rule->flow_id = flow_id;
5735                 rule->queue_id = queue_id;
5736                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5737                 ret = hclge_fd_config_rule(hdev, rule);
5738
5739                 spin_unlock_bh(&hdev->fd_rule_lock);
5740
5741                 if (ret)
5742                         return ret;
5743
5744                 return rule->location;
5745         }
5746
5747         spin_unlock_bh(&hdev->fd_rule_lock);
5748
5749         if (rule->queue_id == queue_id)
5750                 return rule->location;
5751
5752         tmp_queue_id = rule->queue_id;
5753         rule->queue_id = queue_id;
5754         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5755         if (ret) {
5756                 rule->queue_id = tmp_queue_id;
5757                 return ret;
5758         }
5759
5760         return rule->location;
5761 }
5762
5763 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5764 {
5765 #ifdef CONFIG_RFS_ACCEL
5766         struct hnae3_handle *handle = &hdev->vport[0].nic;
5767         struct hclge_fd_rule *rule;
5768         struct hlist_node *node;
5769         HLIST_HEAD(del_list);
5770
5771         spin_lock_bh(&hdev->fd_rule_lock);
5772         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5773                 spin_unlock_bh(&hdev->fd_rule_lock);
5774                 return;
5775         }
5776         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5777                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5778                                         rule->flow_id, rule->location)) {
5779                         hlist_del_init(&rule->rule_node);
5780                         hlist_add_head(&rule->rule_node, &del_list);
5781                         hdev->hclge_fd_rule_num--;
5782                         clear_bit(rule->location, hdev->fd_bmap);
5783                 }
5784         }
5785         spin_unlock_bh(&hdev->fd_rule_lock);
5786
5787         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5788                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5789                                      rule->location, NULL, false);
5790                 kfree(rule);
5791         }
5792 #endif
5793 }
5794
5795 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5796 {
5797 #ifdef CONFIG_RFS_ACCEL
5798         struct hclge_vport *vport = hclge_get_vport(handle);
5799         struct hclge_dev *hdev = vport->back;
5800
5801         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5802                 hclge_del_all_fd_entries(handle, true);
5803 #endif
5804 }
5805
5806 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5807 {
5808         struct hclge_vport *vport = hclge_get_vport(handle);
5809         struct hclge_dev *hdev = vport->back;
5810
5811         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5812                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5813 }
5814
5815 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5816 {
5817         struct hclge_vport *vport = hclge_get_vport(handle);
5818         struct hclge_dev *hdev = vport->back;
5819
5820         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5821 }
5822
5823 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5824 {
5825         struct hclge_vport *vport = hclge_get_vport(handle);
5826         struct hclge_dev *hdev = vport->back;
5827
5828         return hdev->rst_stats.hw_reset_done_cnt;
5829 }
5830
5831 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5832 {
5833         struct hclge_vport *vport = hclge_get_vport(handle);
5834         struct hclge_dev *hdev = vport->back;
5835         bool clear;
5836
5837         hdev->fd_en = enable;
5838         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5839         if (!enable)
5840                 hclge_del_all_fd_entries(handle, clear);
5841         else
5842                 hclge_restore_fd_entries(handle);
5843 }
5844
5845 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5846 {
5847         struct hclge_desc desc;
5848         struct hclge_config_mac_mode_cmd *req =
5849                 (struct hclge_config_mac_mode_cmd *)desc.data;
5850         u32 loop_en = 0;
5851         int ret;
5852
5853         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5854         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5855         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5856         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5857         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5858         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5859         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5860         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5861         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5862         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5863         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5864         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5865         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5866         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5867         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5868         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5869
5870         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5871         if (ret)
5872                 dev_err(&hdev->pdev->dev,
5873                         "mac enable fail, ret =%d.\n", ret);
5874 }
5875
5876 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5877 {
5878         struct hclge_config_mac_mode_cmd *req;
5879         struct hclge_desc desc;
5880         u32 loop_en;
5881         int ret;
5882
5883         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5884         /* 1 Read out the MAC mode config at first */
5885         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5886         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5887         if (ret) {
5888                 dev_err(&hdev->pdev->dev,
5889                         "mac loopback get fail, ret =%d.\n", ret);
5890                 return ret;
5891         }
5892
5893         /* 2 Then setup the loopback flag */
5894         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5895         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5896         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5897         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5898
5899         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5900
5901         /* 3 Config mac work mode with loopback flag
5902          * and its original configure parameters
5903          */
5904         hclge_cmd_reuse_desc(&desc, false);
5905         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5906         if (ret)
5907                 dev_err(&hdev->pdev->dev,
5908                         "mac loopback set fail, ret =%d.\n", ret);
5909         return ret;
5910 }
5911
5912 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5913                                      enum hnae3_loop loop_mode)
5914 {
5915 #define HCLGE_SERDES_RETRY_MS   10
5916 #define HCLGE_SERDES_RETRY_NUM  100
5917
5918 #define HCLGE_MAC_LINK_STATUS_MS   10
5919 #define HCLGE_MAC_LINK_STATUS_NUM  100
5920 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5921 #define HCLGE_MAC_LINK_STATUS_UP   1
5922
5923         struct hclge_serdes_lb_cmd *req;
5924         struct hclge_desc desc;
5925         int mac_link_ret = 0;
5926         int ret, i = 0;
5927         u8 loop_mode_b;
5928
5929         req = (struct hclge_serdes_lb_cmd *)desc.data;
5930         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5931
5932         switch (loop_mode) {
5933         case HNAE3_LOOP_SERIAL_SERDES:
5934                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5935                 break;
5936         case HNAE3_LOOP_PARALLEL_SERDES:
5937                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5938                 break;
5939         default:
5940                 dev_err(&hdev->pdev->dev,
5941                         "unsupported serdes loopback mode %d\n", loop_mode);
5942                 return -ENOTSUPP;
5943         }
5944
5945         if (en) {
5946                 req->enable = loop_mode_b;
5947                 req->mask = loop_mode_b;
5948                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5949         } else {
5950                 req->mask = loop_mode_b;
5951                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5952         }
5953
5954         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5955         if (ret) {
5956                 dev_err(&hdev->pdev->dev,
5957                         "serdes loopback set fail, ret = %d\n", ret);
5958                 return ret;
5959         }
5960
5961         do {
5962                 msleep(HCLGE_SERDES_RETRY_MS);
5963                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5964                                            true);
5965                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5966                 if (ret) {
5967                         dev_err(&hdev->pdev->dev,
5968                                 "serdes loopback get, ret = %d\n", ret);
5969                         return ret;
5970                 }
5971         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5972                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5973
5974         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5975                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5976                 return -EBUSY;
5977         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5978                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5979                 return -EIO;
5980         }
5981
5982         hclge_cfg_mac_mode(hdev, en);
5983
5984         i = 0;
5985         do {
5986                 /* serdes Internal loopback, independent of the network cable.*/
5987                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5988                 ret = hclge_get_mac_link_status(hdev);
5989                 if (ret == mac_link_ret)
5990                         return 0;
5991         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5992
5993         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5994
5995         return -EBUSY;
5996 }
5997
5998 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5999                             int stream_id, bool enable)
6000 {
6001         struct hclge_desc desc;
6002         struct hclge_cfg_com_tqp_queue_cmd *req =
6003                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6004         int ret;
6005
6006         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6007         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6008         req->stream_id = cpu_to_le16(stream_id);
6009         req->enable |= enable << HCLGE_TQP_ENABLE_B;
6010
6011         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6012         if (ret)
6013                 dev_err(&hdev->pdev->dev,
6014                         "Tqp enable fail, status =%d.\n", ret);
6015         return ret;
6016 }
6017
6018 static int hclge_set_loopback(struct hnae3_handle *handle,
6019                               enum hnae3_loop loop_mode, bool en)
6020 {
6021         struct hclge_vport *vport = hclge_get_vport(handle);
6022         struct hnae3_knic_private_info *kinfo;
6023         struct hclge_dev *hdev = vport->back;
6024         int i, ret;
6025
6026         switch (loop_mode) {
6027         case HNAE3_LOOP_APP:
6028                 ret = hclge_set_app_loopback(hdev, en);
6029                 break;
6030         case HNAE3_LOOP_SERIAL_SERDES:
6031         case HNAE3_LOOP_PARALLEL_SERDES:
6032                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6033                 break;
6034         default:
6035                 ret = -ENOTSUPP;
6036                 dev_err(&hdev->pdev->dev,
6037                         "loop_mode %d is not supported\n", loop_mode);
6038                 break;
6039         }
6040
6041         if (ret)
6042                 return ret;
6043
6044         kinfo = &vport->nic.kinfo;
6045         for (i = 0; i < kinfo->num_tqps; i++) {
6046                 ret = hclge_tqp_enable(hdev, i, 0, en);
6047                 if (ret)
6048                         return ret;
6049         }
6050
6051         return 0;
6052 }
6053
6054 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6055 {
6056         struct hclge_vport *vport = hclge_get_vport(handle);
6057         struct hnae3_knic_private_info *kinfo;
6058         struct hnae3_queue *queue;
6059         struct hclge_tqp *tqp;
6060         int i;
6061
6062         kinfo = &vport->nic.kinfo;
6063         for (i = 0; i < kinfo->num_tqps; i++) {
6064                 queue = handle->kinfo.tqp[i];
6065                 tqp = container_of(queue, struct hclge_tqp, q);
6066                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6067         }
6068 }
6069
6070 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6071 {
6072         struct hclge_vport *vport = hclge_get_vport(handle);
6073         struct hclge_dev *hdev = vport->back;
6074
6075         if (enable) {
6076                 mod_timer(&hdev->service_timer, jiffies + HZ);
6077         } else {
6078                 del_timer_sync(&hdev->service_timer);
6079                 cancel_work_sync(&hdev->service_task);
6080                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6081         }
6082 }
6083
6084 static int hclge_ae_start(struct hnae3_handle *handle)
6085 {
6086         struct hclge_vport *vport = hclge_get_vport(handle);
6087         struct hclge_dev *hdev = vport->back;
6088
6089         /* mac enable */
6090         hclge_cfg_mac_mode(hdev, true);
6091         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6092         hdev->hw.mac.link = 0;
6093
6094         /* reset tqp stats */
6095         hclge_reset_tqp_stats(handle);
6096
6097         hclge_mac_start_phy(hdev);
6098
6099         return 0;
6100 }
6101
6102 static void hclge_ae_stop(struct hnae3_handle *handle)
6103 {
6104         struct hclge_vport *vport = hclge_get_vport(handle);
6105         struct hclge_dev *hdev = vport->back;
6106         int i;
6107
6108         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6109
6110         hclge_clear_arfs_rules(handle);
6111
6112         /* If it is not PF reset, the firmware will disable the MAC,
6113          * so it only need to stop phy here.
6114          */
6115         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6116             hdev->reset_type != HNAE3_FUNC_RESET) {
6117                 hclge_mac_stop_phy(hdev);
6118                 return;
6119         }
6120
6121         for (i = 0; i < handle->kinfo.num_tqps; i++)
6122                 hclge_reset_tqp(handle, i);
6123
6124         /* Mac disable */
6125         hclge_cfg_mac_mode(hdev, false);
6126
6127         hclge_mac_stop_phy(hdev);
6128
6129         /* reset tqp stats */
6130         hclge_reset_tqp_stats(handle);
6131         hclge_update_link_status(hdev);
6132 }
6133
6134 int hclge_vport_start(struct hclge_vport *vport)
6135 {
6136         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6137         vport->last_active_jiffies = jiffies;
6138         return 0;
6139 }
6140
6141 void hclge_vport_stop(struct hclge_vport *vport)
6142 {
6143         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6144 }
6145
6146 static int hclge_client_start(struct hnae3_handle *handle)
6147 {
6148         struct hclge_vport *vport = hclge_get_vport(handle);
6149
6150         return hclge_vport_start(vport);
6151 }
6152
6153 static void hclge_client_stop(struct hnae3_handle *handle)
6154 {
6155         struct hclge_vport *vport = hclge_get_vport(handle);
6156
6157         hclge_vport_stop(vport);
6158 }
6159
6160 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6161                                          u16 cmdq_resp, u8  resp_code,
6162                                          enum hclge_mac_vlan_tbl_opcode op)
6163 {
6164         struct hclge_dev *hdev = vport->back;
6165         int return_status = -EIO;
6166
6167         if (cmdq_resp) {
6168                 dev_err(&hdev->pdev->dev,
6169                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6170                         cmdq_resp);
6171                 return -EIO;
6172         }
6173
6174         if (op == HCLGE_MAC_VLAN_ADD) {
6175                 if ((!resp_code) || (resp_code == 1)) {
6176                         return_status = 0;
6177                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6178                         return_status = -ENOSPC;
6179                         dev_err(&hdev->pdev->dev,
6180                                 "add mac addr failed for uc_overflow.\n");
6181                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6182                         return_status = -ENOSPC;
6183                         dev_err(&hdev->pdev->dev,
6184                                 "add mac addr failed for mc_overflow.\n");
6185                 } else {
6186                         dev_err(&hdev->pdev->dev,
6187                                 "add mac addr failed for undefined, code=%d.\n",
6188                                 resp_code);
6189                 }
6190         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6191                 if (!resp_code) {
6192                         return_status = 0;
6193                 } else if (resp_code == 1) {
6194                         return_status = -ENOENT;
6195                         dev_dbg(&hdev->pdev->dev,
6196                                 "remove mac addr failed for miss.\n");
6197                 } else {
6198                         dev_err(&hdev->pdev->dev,
6199                                 "remove mac addr failed for undefined, code=%d.\n",
6200                                 resp_code);
6201                 }
6202         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6203                 if (!resp_code) {
6204                         return_status = 0;
6205                 } else if (resp_code == 1) {
6206                         return_status = -ENOENT;
6207                         dev_dbg(&hdev->pdev->dev,
6208                                 "lookup mac addr failed for miss.\n");
6209                 } else {
6210                         dev_err(&hdev->pdev->dev,
6211                                 "lookup mac addr failed for undefined, code=%d.\n",
6212                                 resp_code);
6213                 }
6214         } else {
6215                 return_status = -EINVAL;
6216                 dev_err(&hdev->pdev->dev,
6217                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6218                         op);
6219         }
6220
6221         return return_status;
6222 }
6223
6224 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6225 {
6226 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6227
6228         int word_num;
6229         int bit_num;
6230
6231         if (vfid > 255 || vfid < 0)
6232                 return -EIO;
6233
6234         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6235                 word_num = vfid / 32;
6236                 bit_num  = vfid % 32;
6237                 if (clr)
6238                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6239                 else
6240                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6241         } else {
6242                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6243                 bit_num  = vfid % 32;
6244                 if (clr)
6245                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6246                 else
6247                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6248         }
6249
6250         return 0;
6251 }
6252
6253 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6254 {
6255 #define HCLGE_DESC_NUMBER 3
6256 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6257         int i, j;
6258
6259         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6260                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6261                         if (desc[i].data[j])
6262                                 return false;
6263
6264         return true;
6265 }
6266
6267 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6268                                    const u8 *addr, bool is_mc)
6269 {
6270         const unsigned char *mac_addr = addr;
6271         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6272                        (mac_addr[0]) | (mac_addr[1] << 8);
6273         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6274
6275         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6276         if (is_mc) {
6277                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6278                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6279         }
6280
6281         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6282         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6283 }
6284
6285 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6286                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6287 {
6288         struct hclge_dev *hdev = vport->back;
6289         struct hclge_desc desc;
6290         u8 resp_code;
6291         u16 retval;
6292         int ret;
6293
6294         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6295
6296         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6297
6298         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6299         if (ret) {
6300                 dev_err(&hdev->pdev->dev,
6301                         "del mac addr failed for cmd_send, ret =%d.\n",
6302                         ret);
6303                 return ret;
6304         }
6305         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6306         retval = le16_to_cpu(desc.retval);
6307
6308         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6309                                              HCLGE_MAC_VLAN_REMOVE);
6310 }
6311
6312 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6313                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6314                                      struct hclge_desc *desc,
6315                                      bool is_mc)
6316 {
6317         struct hclge_dev *hdev = vport->back;
6318         u8 resp_code;
6319         u16 retval;
6320         int ret;
6321
6322         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6323         if (is_mc) {
6324                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6325                 memcpy(desc[0].data,
6326                        req,
6327                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6328                 hclge_cmd_setup_basic_desc(&desc[1],
6329                                            HCLGE_OPC_MAC_VLAN_ADD,
6330                                            true);
6331                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6332                 hclge_cmd_setup_basic_desc(&desc[2],
6333                                            HCLGE_OPC_MAC_VLAN_ADD,
6334                                            true);
6335                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6336         } else {
6337                 memcpy(desc[0].data,
6338                        req,
6339                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6340                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6341         }
6342         if (ret) {
6343                 dev_err(&hdev->pdev->dev,
6344                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6345                         ret);
6346                 return ret;
6347         }
6348         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6349         retval = le16_to_cpu(desc[0].retval);
6350
6351         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6352                                              HCLGE_MAC_VLAN_LKUP);
6353 }
6354
6355 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6356                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6357                                   struct hclge_desc *mc_desc)
6358 {
6359         struct hclge_dev *hdev = vport->back;
6360         int cfg_status;
6361         u8 resp_code;
6362         u16 retval;
6363         int ret;
6364
6365         if (!mc_desc) {
6366                 struct hclge_desc desc;
6367
6368                 hclge_cmd_setup_basic_desc(&desc,
6369                                            HCLGE_OPC_MAC_VLAN_ADD,
6370                                            false);
6371                 memcpy(desc.data, req,
6372                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6373                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6374                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6375                 retval = le16_to_cpu(desc.retval);
6376
6377                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6378                                                            resp_code,
6379                                                            HCLGE_MAC_VLAN_ADD);
6380         } else {
6381                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6382                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6383                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6384                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6385                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6386                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6387                 memcpy(mc_desc[0].data, req,
6388                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6389                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6390                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6391                 retval = le16_to_cpu(mc_desc[0].retval);
6392
6393                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6394                                                            resp_code,
6395                                                            HCLGE_MAC_VLAN_ADD);
6396         }
6397
6398         if (ret) {
6399                 dev_err(&hdev->pdev->dev,
6400                         "add mac addr failed for cmd_send, ret =%d.\n",
6401                         ret);
6402                 return ret;
6403         }
6404
6405         return cfg_status;
6406 }
6407
6408 static int hclge_init_umv_space(struct hclge_dev *hdev)
6409 {
6410         u16 allocated_size = 0;
6411         int ret;
6412
6413         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6414                                   true);
6415         if (ret)
6416                 return ret;
6417
6418         if (allocated_size < hdev->wanted_umv_size)
6419                 dev_warn(&hdev->pdev->dev,
6420                          "Alloc umv space failed, want %d, get %d\n",
6421                          hdev->wanted_umv_size, allocated_size);
6422
6423         mutex_init(&hdev->umv_mutex);
6424         hdev->max_umv_size = allocated_size;
6425         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6426          * preserve some unicast mac vlan table entries shared by pf
6427          * and its vfs.
6428          */
6429         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6430         hdev->share_umv_size = hdev->priv_umv_size +
6431                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6432
6433         return 0;
6434 }
6435
6436 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6437 {
6438         int ret;
6439
6440         if (hdev->max_umv_size > 0) {
6441                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6442                                           false);
6443                 if (ret)
6444                         return ret;
6445                 hdev->max_umv_size = 0;
6446         }
6447         mutex_destroy(&hdev->umv_mutex);
6448
6449         return 0;
6450 }
6451
6452 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6453                                u16 *allocated_size, bool is_alloc)
6454 {
6455         struct hclge_umv_spc_alc_cmd *req;
6456         struct hclge_desc desc;
6457         int ret;
6458
6459         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6460         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6461         if (!is_alloc)
6462                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6463
6464         req->space_size = cpu_to_le32(space_size);
6465
6466         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6467         if (ret) {
6468                 dev_err(&hdev->pdev->dev,
6469                         "%s umv space failed for cmd_send, ret =%d\n",
6470                         is_alloc ? "allocate" : "free", ret);
6471                 return ret;
6472         }
6473
6474         if (is_alloc && allocated_size)
6475                 *allocated_size = le32_to_cpu(desc.data[1]);
6476
6477         return 0;
6478 }
6479
6480 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6481 {
6482         struct hclge_vport *vport;
6483         int i;
6484
6485         for (i = 0; i < hdev->num_alloc_vport; i++) {
6486                 vport = &hdev->vport[i];
6487                 vport->used_umv_num = 0;
6488         }
6489
6490         mutex_lock(&hdev->umv_mutex);
6491         hdev->share_umv_size = hdev->priv_umv_size +
6492                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6493         mutex_unlock(&hdev->umv_mutex);
6494 }
6495
6496 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6497 {
6498         struct hclge_dev *hdev = vport->back;
6499         bool is_full;
6500
6501         mutex_lock(&hdev->umv_mutex);
6502         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6503                    hdev->share_umv_size == 0);
6504         mutex_unlock(&hdev->umv_mutex);
6505
6506         return is_full;
6507 }
6508
6509 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6510 {
6511         struct hclge_dev *hdev = vport->back;
6512
6513         mutex_lock(&hdev->umv_mutex);
6514         if (is_free) {
6515                 if (vport->used_umv_num > hdev->priv_umv_size)
6516                         hdev->share_umv_size++;
6517
6518                 if (vport->used_umv_num > 0)
6519                         vport->used_umv_num--;
6520         } else {
6521                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6522                     hdev->share_umv_size > 0)
6523                         hdev->share_umv_size--;
6524                 vport->used_umv_num++;
6525         }
6526         mutex_unlock(&hdev->umv_mutex);
6527 }
6528
6529 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6530                              const unsigned char *addr)
6531 {
6532         struct hclge_vport *vport = hclge_get_vport(handle);
6533
6534         return hclge_add_uc_addr_common(vport, addr);
6535 }
6536
6537 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6538                              const unsigned char *addr)
6539 {
6540         struct hclge_dev *hdev = vport->back;
6541         struct hclge_mac_vlan_tbl_entry_cmd req;
6542         struct hclge_desc desc;
6543         u16 egress_port = 0;
6544         int ret;
6545
6546         /* mac addr check */
6547         if (is_zero_ether_addr(addr) ||
6548             is_broadcast_ether_addr(addr) ||
6549             is_multicast_ether_addr(addr)) {
6550                 dev_err(&hdev->pdev->dev,
6551                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6552                          addr,
6553                          is_zero_ether_addr(addr),
6554                          is_broadcast_ether_addr(addr),
6555                          is_multicast_ether_addr(addr));
6556                 return -EINVAL;
6557         }
6558
6559         memset(&req, 0, sizeof(req));
6560
6561         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6562                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6563
6564         req.egress_port = cpu_to_le16(egress_port);
6565
6566         hclge_prepare_mac_addr(&req, addr, false);
6567
6568         /* Lookup the mac address in the mac_vlan table, and add
6569          * it if the entry is inexistent. Repeated unicast entry
6570          * is not allowed in the mac vlan table.
6571          */
6572         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6573         if (ret == -ENOENT) {
6574                 if (!hclge_is_umv_space_full(vport)) {
6575                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6576                         if (!ret)
6577                                 hclge_update_umv_space(vport, false);
6578                         return ret;
6579                 }
6580
6581                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6582                         hdev->priv_umv_size);
6583
6584                 return -ENOSPC;
6585         }
6586
6587         /* check if we just hit the duplicate */
6588         if (!ret) {
6589                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6590                          vport->vport_id, addr);
6591                 return 0;
6592         }
6593
6594         dev_err(&hdev->pdev->dev,
6595                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6596                 addr);
6597
6598         return ret;
6599 }
6600
6601 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6602                             const unsigned char *addr)
6603 {
6604         struct hclge_vport *vport = hclge_get_vport(handle);
6605
6606         return hclge_rm_uc_addr_common(vport, addr);
6607 }
6608
6609 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6610                             const unsigned char *addr)
6611 {
6612         struct hclge_dev *hdev = vport->back;
6613         struct hclge_mac_vlan_tbl_entry_cmd req;
6614         int ret;
6615
6616         /* mac addr check */
6617         if (is_zero_ether_addr(addr) ||
6618             is_broadcast_ether_addr(addr) ||
6619             is_multicast_ether_addr(addr)) {
6620                 dev_dbg(&hdev->pdev->dev,
6621                         "Remove mac err! invalid mac:%pM.\n",
6622                          addr);
6623                 return -EINVAL;
6624         }
6625
6626         memset(&req, 0, sizeof(req));
6627         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6628         hclge_prepare_mac_addr(&req, addr, false);
6629         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6630         if (!ret)
6631                 hclge_update_umv_space(vport, true);
6632
6633         return ret;
6634 }
6635
6636 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6637                              const unsigned char *addr)
6638 {
6639         struct hclge_vport *vport = hclge_get_vport(handle);
6640
6641         return hclge_add_mc_addr_common(vport, addr);
6642 }
6643
6644 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6645                              const unsigned char *addr)
6646 {
6647         struct hclge_dev *hdev = vport->back;
6648         struct hclge_mac_vlan_tbl_entry_cmd req;
6649         struct hclge_desc desc[3];
6650         int status;
6651
6652         /* mac addr check */
6653         if (!is_multicast_ether_addr(addr)) {
6654                 dev_err(&hdev->pdev->dev,
6655                         "Add mc mac err! invalid mac:%pM.\n",
6656                          addr);
6657                 return -EINVAL;
6658         }
6659         memset(&req, 0, sizeof(req));
6660         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6661         hclge_prepare_mac_addr(&req, addr, true);
6662         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6663         if (status) {
6664                 /* This mac addr do not exist, add new entry for it */
6665                 memset(desc[0].data, 0, sizeof(desc[0].data));
6666                 memset(desc[1].data, 0, sizeof(desc[0].data));
6667                 memset(desc[2].data, 0, sizeof(desc[0].data));
6668         }
6669         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6670         if (status)
6671                 return status;
6672         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6673
6674         if (status == -ENOSPC)
6675                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6676
6677         return status;
6678 }
6679
6680 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6681                             const unsigned char *addr)
6682 {
6683         struct hclge_vport *vport = hclge_get_vport(handle);
6684
6685         return hclge_rm_mc_addr_common(vport, addr);
6686 }
6687
6688 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6689                             const unsigned char *addr)
6690 {
6691         struct hclge_dev *hdev = vport->back;
6692         struct hclge_mac_vlan_tbl_entry_cmd req;
6693         enum hclge_cmd_status status;
6694         struct hclge_desc desc[3];
6695
6696         /* mac addr check */
6697         if (!is_multicast_ether_addr(addr)) {
6698                 dev_dbg(&hdev->pdev->dev,
6699                         "Remove mc mac err! invalid mac:%pM.\n",
6700                          addr);
6701                 return -EINVAL;
6702         }
6703
6704         memset(&req, 0, sizeof(req));
6705         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6706         hclge_prepare_mac_addr(&req, addr, true);
6707         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6708         if (!status) {
6709                 /* This mac addr exist, remove this handle's VFID for it */
6710                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6711                 if (status)
6712                         return status;
6713
6714                 if (hclge_is_all_function_id_zero(desc))
6715                         /* All the vfid is zero, so need to delete this entry */
6716                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6717                 else
6718                         /* Not all the vfid is zero, update the vfid */
6719                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6720
6721         } else {
6722                 /* Maybe this mac address is in mta table, but it cannot be
6723                  * deleted here because an entry of mta represents an address
6724                  * range rather than a specific address. the delete action to
6725                  * all entries will take effect in update_mta_status called by
6726                  * hns3_nic_set_rx_mode.
6727                  */
6728                 status = 0;
6729         }
6730
6731         return status;
6732 }
6733
6734 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6735                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6736 {
6737         struct hclge_vport_mac_addr_cfg *mac_cfg;
6738         struct list_head *list;
6739
6740         if (!vport->vport_id)
6741                 return;
6742
6743         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6744         if (!mac_cfg)
6745                 return;
6746
6747         mac_cfg->hd_tbl_status = true;
6748         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6749
6750         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6751                &vport->uc_mac_list : &vport->mc_mac_list;
6752
6753         list_add_tail(&mac_cfg->node, list);
6754 }
6755
6756 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6757                               bool is_write_tbl,
6758                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6759 {
6760         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6761         struct list_head *list;
6762         bool uc_flag, mc_flag;
6763
6764         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6765                &vport->uc_mac_list : &vport->mc_mac_list;
6766
6767         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6768         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6769
6770         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6771                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6772                         if (uc_flag && mac_cfg->hd_tbl_status)
6773                                 hclge_rm_uc_addr_common(vport, mac_addr);
6774
6775                         if (mc_flag && mac_cfg->hd_tbl_status)
6776                                 hclge_rm_mc_addr_common(vport, mac_addr);
6777
6778                         list_del(&mac_cfg->node);
6779                         kfree(mac_cfg);
6780                         break;
6781                 }
6782         }
6783 }
6784
6785 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6786                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6787 {
6788         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6789         struct list_head *list;
6790
6791         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6792                &vport->uc_mac_list : &vport->mc_mac_list;
6793
6794         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6795                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6796                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6797
6798                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6799                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6800
6801                 mac_cfg->hd_tbl_status = false;
6802                 if (is_del_list) {
6803                         list_del(&mac_cfg->node);
6804                         kfree(mac_cfg);
6805                 }
6806         }
6807 }
6808
6809 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6810 {
6811         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6812         struct hclge_vport *vport;
6813         int i;
6814
6815         mutex_lock(&hdev->vport_cfg_mutex);
6816         for (i = 0; i < hdev->num_alloc_vport; i++) {
6817                 vport = &hdev->vport[i];
6818                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6819                         list_del(&mac->node);
6820                         kfree(mac);
6821                 }
6822
6823                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6824                         list_del(&mac->node);
6825                         kfree(mac);
6826                 }
6827         }
6828         mutex_unlock(&hdev->vport_cfg_mutex);
6829 }
6830
6831 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6832                                               u16 cmdq_resp, u8 resp_code)
6833 {
6834 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6835 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6836 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6837 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6838
6839         int return_status;
6840
6841         if (cmdq_resp) {
6842                 dev_err(&hdev->pdev->dev,
6843                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6844                         cmdq_resp);
6845                 return -EIO;
6846         }
6847
6848         switch (resp_code) {
6849         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6850         case HCLGE_ETHERTYPE_ALREADY_ADD:
6851                 return_status = 0;
6852                 break;
6853         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6854                 dev_err(&hdev->pdev->dev,
6855                         "add mac ethertype failed for manager table overflow.\n");
6856                 return_status = -EIO;
6857                 break;
6858         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6859                 dev_err(&hdev->pdev->dev,
6860                         "add mac ethertype failed for key conflict.\n");
6861                 return_status = -EIO;
6862                 break;
6863         default:
6864                 dev_err(&hdev->pdev->dev,
6865                         "add mac ethertype failed for undefined, code=%d.\n",
6866                         resp_code);
6867                 return_status = -EIO;
6868         }
6869
6870         return return_status;
6871 }
6872
6873 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6874                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6875 {
6876         struct hclge_desc desc;
6877         u8 resp_code;
6878         u16 retval;
6879         int ret;
6880
6881         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6882         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6883
6884         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6885         if (ret) {
6886                 dev_err(&hdev->pdev->dev,
6887                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6888                         ret);
6889                 return ret;
6890         }
6891
6892         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6893         retval = le16_to_cpu(desc.retval);
6894
6895         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6896 }
6897
6898 static int init_mgr_tbl(struct hclge_dev *hdev)
6899 {
6900         int ret;
6901         int i;
6902
6903         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6904                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6905                 if (ret) {
6906                         dev_err(&hdev->pdev->dev,
6907                                 "add mac ethertype failed, ret =%d.\n",
6908                                 ret);
6909                         return ret;
6910                 }
6911         }
6912
6913         return 0;
6914 }
6915
6916 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6917 {
6918         struct hclge_vport *vport = hclge_get_vport(handle);
6919         struct hclge_dev *hdev = vport->back;
6920
6921         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6922 }
6923
6924 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6925                               bool is_first)
6926 {
6927         const unsigned char *new_addr = (const unsigned char *)p;
6928         struct hclge_vport *vport = hclge_get_vport(handle);
6929         struct hclge_dev *hdev = vport->back;
6930         int ret;
6931
6932         /* mac addr check */
6933         if (is_zero_ether_addr(new_addr) ||
6934             is_broadcast_ether_addr(new_addr) ||
6935             is_multicast_ether_addr(new_addr)) {
6936                 dev_err(&hdev->pdev->dev,
6937                         "Change uc mac err! invalid mac:%p.\n",
6938                          new_addr);
6939                 return -EINVAL;
6940         }
6941
6942         if ((!is_first || is_kdump_kernel()) &&
6943             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6944                 dev_warn(&hdev->pdev->dev,
6945                          "remove old uc mac address fail.\n");
6946
6947         ret = hclge_add_uc_addr(handle, new_addr);
6948         if (ret) {
6949                 dev_err(&hdev->pdev->dev,
6950                         "add uc mac address fail, ret =%d.\n",
6951                         ret);
6952
6953                 if (!is_first &&
6954                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6955                         dev_err(&hdev->pdev->dev,
6956                                 "restore uc mac address fail.\n");
6957
6958                 return -EIO;
6959         }
6960
6961         ret = hclge_pause_addr_cfg(hdev, new_addr);
6962         if (ret) {
6963                 dev_err(&hdev->pdev->dev,
6964                         "configure mac pause address fail, ret =%d.\n",
6965                         ret);
6966                 return -EIO;
6967         }
6968
6969         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6970
6971         return 0;
6972 }
6973
6974 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6975                           int cmd)
6976 {
6977         struct hclge_vport *vport = hclge_get_vport(handle);
6978         struct hclge_dev *hdev = vport->back;
6979
6980         if (!hdev->hw.mac.phydev)
6981                 return -EOPNOTSUPP;
6982
6983         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6984 }
6985
6986 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6987                                       u8 fe_type, bool filter_en, u8 vf_id)
6988 {
6989         struct hclge_vlan_filter_ctrl_cmd *req;
6990         struct hclge_desc desc;
6991         int ret;
6992
6993         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6994
6995         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6996         req->vlan_type = vlan_type;
6997         req->vlan_fe = filter_en ? fe_type : 0;
6998         req->vf_id = vf_id;
6999
7000         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7001         if (ret)
7002                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7003                         ret);
7004
7005         return ret;
7006 }
7007
7008 #define HCLGE_FILTER_TYPE_VF            0
7009 #define HCLGE_FILTER_TYPE_PORT          1
7010 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7011 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7012 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7013 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7014 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7015 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7016                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7017 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7018                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7019
7020 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7021 {
7022         struct hclge_vport *vport = hclge_get_vport(handle);
7023         struct hclge_dev *hdev = vport->back;
7024
7025         if (hdev->pdev->revision >= 0x21) {
7026                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7027                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7028                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7029                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7030         } else {
7031                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7032                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7033                                            0);
7034         }
7035         if (enable)
7036                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7037         else
7038                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7039 }
7040
7041 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7042                                     bool is_kill, u16 vlan, u8 qos,
7043                                     __be16 proto)
7044 {
7045 #define HCLGE_MAX_VF_BYTES  16
7046         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7047         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7048         struct hclge_desc desc[2];
7049         u8 vf_byte_val;
7050         u8 vf_byte_off;
7051         int ret;
7052
7053         /* if vf vlan table is full, firmware will close vf vlan filter, it
7054          * is unable and unnecessary to add new vlan id to vf vlan filter
7055          */
7056         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7057                 return 0;
7058
7059         hclge_cmd_setup_basic_desc(&desc[0],
7060                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7061         hclge_cmd_setup_basic_desc(&desc[1],
7062                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7063
7064         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7065
7066         vf_byte_off = vfid / 8;
7067         vf_byte_val = 1 << (vfid % 8);
7068
7069         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7070         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7071
7072         req0->vlan_id  = cpu_to_le16(vlan);
7073         req0->vlan_cfg = is_kill;
7074
7075         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7076                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7077         else
7078                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7079
7080         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7081         if (ret) {
7082                 dev_err(&hdev->pdev->dev,
7083                         "Send vf vlan command fail, ret =%d.\n",
7084                         ret);
7085                 return ret;
7086         }
7087
7088         if (!is_kill) {
7089 #define HCLGE_VF_VLAN_NO_ENTRY  2
7090                 if (!req0->resp_code || req0->resp_code == 1)
7091                         return 0;
7092
7093                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7094                         set_bit(vfid, hdev->vf_vlan_full);
7095                         dev_warn(&hdev->pdev->dev,
7096                                  "vf vlan table is full, vf vlan filter is disabled\n");
7097                         return 0;
7098                 }
7099
7100                 dev_err(&hdev->pdev->dev,
7101                         "Add vf vlan filter fail, ret =%d.\n",
7102                         req0->resp_code);
7103         } else {
7104 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7105                 if (!req0->resp_code)
7106                         return 0;
7107
7108                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7109                         dev_warn(&hdev->pdev->dev,
7110                                  "vlan %d filter is not in vf vlan table\n",
7111                                  vlan);
7112                         return 0;
7113                 }
7114
7115                 dev_err(&hdev->pdev->dev,
7116                         "Kill vf vlan filter fail, ret =%d.\n",
7117                         req0->resp_code);
7118         }
7119
7120         return -EIO;
7121 }
7122
7123 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7124                                       u16 vlan_id, bool is_kill)
7125 {
7126         struct hclge_vlan_filter_pf_cfg_cmd *req;
7127         struct hclge_desc desc;
7128         u8 vlan_offset_byte_val;
7129         u8 vlan_offset_byte;
7130         u8 vlan_offset_160;
7131         int ret;
7132
7133         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7134
7135         vlan_offset_160 = vlan_id / 160;
7136         vlan_offset_byte = (vlan_id % 160) / 8;
7137         vlan_offset_byte_val = 1 << (vlan_id % 8);
7138
7139         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7140         req->vlan_offset = vlan_offset_160;
7141         req->vlan_cfg = is_kill;
7142         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7143
7144         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7145         if (ret)
7146                 dev_err(&hdev->pdev->dev,
7147                         "port vlan command, send fail, ret =%d.\n", ret);
7148         return ret;
7149 }
7150
7151 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7152                                     u16 vport_id, u16 vlan_id, u8 qos,
7153                                     bool is_kill)
7154 {
7155         u16 vport_idx, vport_num = 0;
7156         int ret;
7157
7158         if (is_kill && !vlan_id)
7159                 return 0;
7160
7161         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7162                                        0, proto);
7163         if (ret) {
7164                 dev_err(&hdev->pdev->dev,
7165                         "Set %d vport vlan filter config fail, ret =%d.\n",
7166                         vport_id, ret);
7167                 return ret;
7168         }
7169
7170         /* vlan 0 may be added twice when 8021q module is enabled */
7171         if (!is_kill && !vlan_id &&
7172             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7173                 return 0;
7174
7175         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7176                 dev_err(&hdev->pdev->dev,
7177                         "Add port vlan failed, vport %d is already in vlan %d\n",
7178                         vport_id, vlan_id);
7179                 return -EINVAL;
7180         }
7181
7182         if (is_kill &&
7183             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7184                 dev_err(&hdev->pdev->dev,
7185                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7186                         vport_id, vlan_id);
7187                 return -EINVAL;
7188         }
7189
7190         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7191                 vport_num++;
7192
7193         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7194                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7195                                                  is_kill);
7196
7197         return ret;
7198 }
7199
7200 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7201 {
7202         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7203         struct hclge_vport_vtag_tx_cfg_cmd *req;
7204         struct hclge_dev *hdev = vport->back;
7205         struct hclge_desc desc;
7206         int status;
7207
7208         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7209
7210         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7211         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7212         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7213         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7214                       vcfg->accept_tag1 ? 1 : 0);
7215         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7216                       vcfg->accept_untag1 ? 1 : 0);
7217         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7218                       vcfg->accept_tag2 ? 1 : 0);
7219         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7220                       vcfg->accept_untag2 ? 1 : 0);
7221         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7222                       vcfg->insert_tag1_en ? 1 : 0);
7223         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7224                       vcfg->insert_tag2_en ? 1 : 0);
7225         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7226
7227         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7228         req->vf_bitmap[req->vf_offset] =
7229                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7230
7231         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7232         if (status)
7233                 dev_err(&hdev->pdev->dev,
7234                         "Send port txvlan cfg command fail, ret =%d\n",
7235                         status);
7236
7237         return status;
7238 }
7239
7240 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7241 {
7242         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7243         struct hclge_vport_vtag_rx_cfg_cmd *req;
7244         struct hclge_dev *hdev = vport->back;
7245         struct hclge_desc desc;
7246         int status;
7247
7248         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7249
7250         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7251         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7252                       vcfg->strip_tag1_en ? 1 : 0);
7253         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7254                       vcfg->strip_tag2_en ? 1 : 0);
7255         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7256                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7257         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7258                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7259
7260         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7261         req->vf_bitmap[req->vf_offset] =
7262                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7263
7264         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7265         if (status)
7266                 dev_err(&hdev->pdev->dev,
7267                         "Send port rxvlan cfg command fail, ret =%d\n",
7268                         status);
7269
7270         return status;
7271 }
7272
7273 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7274                                   u16 port_base_vlan_state,
7275                                   u16 vlan_tag)
7276 {
7277         int ret;
7278
7279         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7280                 vport->txvlan_cfg.accept_tag1 = true;
7281                 vport->txvlan_cfg.insert_tag1_en = false;
7282                 vport->txvlan_cfg.default_tag1 = 0;
7283         } else {
7284                 vport->txvlan_cfg.accept_tag1 = false;
7285                 vport->txvlan_cfg.insert_tag1_en = true;
7286                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7287         }
7288
7289         vport->txvlan_cfg.accept_untag1 = true;
7290
7291         /* accept_tag2 and accept_untag2 are not supported on
7292          * pdev revision(0x20), new revision support them,
7293          * this two fields can not be configured by user.
7294          */
7295         vport->txvlan_cfg.accept_tag2 = true;
7296         vport->txvlan_cfg.accept_untag2 = true;
7297         vport->txvlan_cfg.insert_tag2_en = false;
7298         vport->txvlan_cfg.default_tag2 = 0;
7299
7300         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7301                 vport->rxvlan_cfg.strip_tag1_en = false;
7302                 vport->rxvlan_cfg.strip_tag2_en =
7303                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7304         } else {
7305                 vport->rxvlan_cfg.strip_tag1_en =
7306                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7307                 vport->rxvlan_cfg.strip_tag2_en = true;
7308         }
7309         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7310         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7311
7312         ret = hclge_set_vlan_tx_offload_cfg(vport);
7313         if (ret)
7314                 return ret;
7315
7316         return hclge_set_vlan_rx_offload_cfg(vport);
7317 }
7318
7319 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7320 {
7321         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7322         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7323         struct hclge_desc desc;
7324         int status;
7325
7326         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7327         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7328         rx_req->ot_fst_vlan_type =
7329                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7330         rx_req->ot_sec_vlan_type =
7331                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7332         rx_req->in_fst_vlan_type =
7333                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7334         rx_req->in_sec_vlan_type =
7335                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7336
7337         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7338         if (status) {
7339                 dev_err(&hdev->pdev->dev,
7340                         "Send rxvlan protocol type command fail, ret =%d\n",
7341                         status);
7342                 return status;
7343         }
7344
7345         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7346
7347         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7348         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7349         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7350
7351         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7352         if (status)
7353                 dev_err(&hdev->pdev->dev,
7354                         "Send txvlan protocol type command fail, ret =%d\n",
7355                         status);
7356
7357         return status;
7358 }
7359
7360 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7361 {
7362 #define HCLGE_DEF_VLAN_TYPE             0x8100
7363
7364         struct hnae3_handle *handle = &hdev->vport[0].nic;
7365         struct hclge_vport *vport;
7366         int ret;
7367         int i;
7368
7369         if (hdev->pdev->revision >= 0x21) {
7370                 /* for revision 0x21, vf vlan filter is per function */
7371                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7372                         vport = &hdev->vport[i];
7373                         ret = hclge_set_vlan_filter_ctrl(hdev,
7374                                                          HCLGE_FILTER_TYPE_VF,
7375                                                          HCLGE_FILTER_FE_EGRESS,
7376                                                          true,
7377                                                          vport->vport_id);
7378                         if (ret)
7379                                 return ret;
7380                 }
7381
7382                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7383                                                  HCLGE_FILTER_FE_INGRESS, true,
7384                                                  0);
7385                 if (ret)
7386                         return ret;
7387         } else {
7388                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7389                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7390                                                  true, 0);
7391                 if (ret)
7392                         return ret;
7393         }
7394
7395         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7396
7397         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7398         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7399         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7400         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7401         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7402         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7403
7404         ret = hclge_set_vlan_protocol_type(hdev);
7405         if (ret)
7406                 return ret;
7407
7408         for (i = 0; i < hdev->num_alloc_vport; i++) {
7409                 u16 vlan_tag;
7410
7411                 vport = &hdev->vport[i];
7412                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7413
7414                 ret = hclge_vlan_offload_cfg(vport,
7415                                              vport->port_base_vlan_cfg.state,
7416                                              vlan_tag);
7417                 if (ret)
7418                         return ret;
7419         }
7420
7421         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7422 }
7423
7424 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7425                                        bool writen_to_tbl)
7426 {
7427         struct hclge_vport_vlan_cfg *vlan;
7428
7429         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7430         if (!vlan)
7431                 return;
7432
7433         vlan->hd_tbl_status = writen_to_tbl;
7434         vlan->vlan_id = vlan_id;
7435
7436         list_add_tail(&vlan->node, &vport->vlan_list);
7437 }
7438
7439 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7440 {
7441         struct hclge_vport_vlan_cfg *vlan, *tmp;
7442         struct hclge_dev *hdev = vport->back;
7443         int ret;
7444
7445         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7446                 if (!vlan->hd_tbl_status) {
7447                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7448                                                        vport->vport_id,
7449                                                        vlan->vlan_id, 0, false);
7450                         if (ret) {
7451                                 dev_err(&hdev->pdev->dev,
7452                                         "restore vport vlan list failed, ret=%d\n",
7453                                         ret);
7454                                 return ret;
7455                         }
7456                 }
7457                 vlan->hd_tbl_status = true;
7458         }
7459
7460         return 0;
7461 }
7462
7463 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7464                                       bool is_write_tbl)
7465 {
7466         struct hclge_vport_vlan_cfg *vlan, *tmp;
7467         struct hclge_dev *hdev = vport->back;
7468
7469         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7470                 if (vlan->vlan_id == vlan_id) {
7471                         if (is_write_tbl && vlan->hd_tbl_status)
7472                                 hclge_set_vlan_filter_hw(hdev,
7473                                                          htons(ETH_P_8021Q),
7474                                                          vport->vport_id,
7475                                                          vlan_id, 0,
7476                                                          true);
7477
7478                         list_del(&vlan->node);
7479                         kfree(vlan);
7480                         break;
7481                 }
7482         }
7483 }
7484
7485 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7486 {
7487         struct hclge_vport_vlan_cfg *vlan, *tmp;
7488         struct hclge_dev *hdev = vport->back;
7489
7490         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7491                 if (vlan->hd_tbl_status)
7492                         hclge_set_vlan_filter_hw(hdev,
7493                                                  htons(ETH_P_8021Q),
7494                                                  vport->vport_id,
7495                                                  vlan->vlan_id, 0,
7496                                                  true);
7497
7498                 vlan->hd_tbl_status = false;
7499                 if (is_del_list) {
7500                         list_del(&vlan->node);
7501                         kfree(vlan);
7502                 }
7503         }
7504 }
7505
7506 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7507 {
7508         struct hclge_vport_vlan_cfg *vlan, *tmp;
7509         struct hclge_vport *vport;
7510         int i;
7511
7512         mutex_lock(&hdev->vport_cfg_mutex);
7513         for (i = 0; i < hdev->num_alloc_vport; i++) {
7514                 vport = &hdev->vport[i];
7515                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7516                         list_del(&vlan->node);
7517                         kfree(vlan);
7518                 }
7519         }
7520         mutex_unlock(&hdev->vport_cfg_mutex);
7521 }
7522
7523 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7524 {
7525         struct hclge_vport *vport = hclge_get_vport(handle);
7526         struct hclge_vport_vlan_cfg *vlan, *tmp;
7527         struct hclge_dev *hdev = vport->back;
7528         u16 vlan_proto, qos;
7529         u16 state, vlan_id;
7530         int i;
7531
7532         mutex_lock(&hdev->vport_cfg_mutex);
7533         for (i = 0; i < hdev->num_alloc_vport; i++) {
7534                 vport = &hdev->vport[i];
7535                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7536                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7537                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7538                 state = vport->port_base_vlan_cfg.state;
7539
7540                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7541                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7542                                                  vport->vport_id, vlan_id, qos,
7543                                                  false);
7544                         continue;
7545                 }
7546
7547                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7548                         if (vlan->hd_tbl_status)
7549                                 hclge_set_vlan_filter_hw(hdev,
7550                                                          htons(ETH_P_8021Q),
7551                                                          vport->vport_id,
7552                                                          vlan->vlan_id, 0,
7553                                                          false);
7554                 }
7555         }
7556
7557         mutex_unlock(&hdev->vport_cfg_mutex);
7558 }
7559
7560 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7561 {
7562         struct hclge_vport *vport = hclge_get_vport(handle);
7563
7564         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7565                 vport->rxvlan_cfg.strip_tag1_en = false;
7566                 vport->rxvlan_cfg.strip_tag2_en = enable;
7567         } else {
7568                 vport->rxvlan_cfg.strip_tag1_en = enable;
7569                 vport->rxvlan_cfg.strip_tag2_en = true;
7570         }
7571         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7572         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7573         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7574
7575         return hclge_set_vlan_rx_offload_cfg(vport);
7576 }
7577
7578 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7579                                             u16 port_base_vlan_state,
7580                                             struct hclge_vlan_info *new_info,
7581                                             struct hclge_vlan_info *old_info)
7582 {
7583         struct hclge_dev *hdev = vport->back;
7584         int ret;
7585
7586         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7587                 hclge_rm_vport_all_vlan_table(vport, false);
7588                 return hclge_set_vlan_filter_hw(hdev,
7589                                                  htons(new_info->vlan_proto),
7590                                                  vport->vport_id,
7591                                                  new_info->vlan_tag,
7592                                                  new_info->qos, false);
7593         }
7594
7595         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7596                                        vport->vport_id, old_info->vlan_tag,
7597                                        old_info->qos, true);
7598         if (ret)
7599                 return ret;
7600
7601         return hclge_add_vport_all_vlan_table(vport);
7602 }
7603
7604 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7605                                     struct hclge_vlan_info *vlan_info)
7606 {
7607         struct hnae3_handle *nic = &vport->nic;
7608         struct hclge_vlan_info *old_vlan_info;
7609         struct hclge_dev *hdev = vport->back;
7610         int ret;
7611
7612         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7613
7614         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7615         if (ret)
7616                 return ret;
7617
7618         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7619                 /* add new VLAN tag */
7620                 ret = hclge_set_vlan_filter_hw(hdev,
7621                                                htons(vlan_info->vlan_proto),
7622                                                vport->vport_id,
7623                                                vlan_info->vlan_tag,
7624                                                vlan_info->qos, false);
7625                 if (ret)
7626                         return ret;
7627
7628                 /* remove old VLAN tag */
7629                 ret = hclge_set_vlan_filter_hw(hdev,
7630                                                htons(old_vlan_info->vlan_proto),
7631                                                vport->vport_id,
7632                                                old_vlan_info->vlan_tag,
7633                                                old_vlan_info->qos, true);
7634                 if (ret)
7635                         return ret;
7636
7637                 goto update;
7638         }
7639
7640         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7641                                                old_vlan_info);
7642         if (ret)
7643                 return ret;
7644
7645         /* update state only when disable/enable port based VLAN */
7646         vport->port_base_vlan_cfg.state = state;
7647         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7648                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7649         else
7650                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7651
7652 update:
7653         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7654         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7655         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7656
7657         return 0;
7658 }
7659
7660 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7661                                           enum hnae3_port_base_vlan_state state,
7662                                           u16 vlan)
7663 {
7664         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7665                 if (!vlan)
7666                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7667                 else
7668                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7669         } else {
7670                 if (!vlan)
7671                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7672                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7673                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7674                 else
7675                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7676         }
7677 }
7678
7679 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7680                                     u16 vlan, u8 qos, __be16 proto)
7681 {
7682         struct hclge_vport *vport = hclge_get_vport(handle);
7683         struct hclge_dev *hdev = vport->back;
7684         struct hclge_vlan_info vlan_info;
7685         u16 state;
7686         int ret;
7687
7688         if (hdev->pdev->revision == 0x20)
7689                 return -EOPNOTSUPP;
7690
7691         /* qos is a 3 bits value, so can not be bigger than 7 */
7692         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7693                 return -EINVAL;
7694         if (proto != htons(ETH_P_8021Q))
7695                 return -EPROTONOSUPPORT;
7696
7697         vport = &hdev->vport[vfid];
7698         state = hclge_get_port_base_vlan_state(vport,
7699                                                vport->port_base_vlan_cfg.state,
7700                                                vlan);
7701         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7702                 return 0;
7703
7704         vlan_info.vlan_tag = vlan;
7705         vlan_info.qos = qos;
7706         vlan_info.vlan_proto = ntohs(proto);
7707
7708         /* update port based VLAN for PF */
7709         if (!vfid) {
7710                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7711                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7712                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7713
7714                 return ret;
7715         }
7716
7717         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7718                 return hclge_update_port_base_vlan_cfg(vport, state,
7719                                                        &vlan_info);
7720         } else {
7721                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7722                                                         (u8)vfid, state,
7723                                                         vlan, qos,
7724                                                         ntohs(proto));
7725                 return ret;
7726         }
7727 }
7728
7729 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7730                           u16 vlan_id, bool is_kill)
7731 {
7732         struct hclge_vport *vport = hclge_get_vport(handle);
7733         struct hclge_dev *hdev = vport->back;
7734         bool writen_to_tbl = false;
7735         int ret = 0;
7736
7737         /* when port based VLAN enabled, we use port based VLAN as the VLAN
7738          * filter entry. In this case, we don't update VLAN filter table
7739          * when user add new VLAN or remove exist VLAN, just update the vport
7740          * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7741          * table until port based VLAN disabled
7742          */
7743         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7744                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7745                                                vlan_id, 0, is_kill);
7746                 writen_to_tbl = true;
7747         }
7748
7749         if (ret)
7750                 return ret;
7751
7752         if (is_kill)
7753                 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7754         else
7755                 hclge_add_vport_vlan_table(vport, vlan_id,
7756                                            writen_to_tbl);
7757
7758         return 0;
7759 }
7760
7761 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7762 {
7763         struct hclge_config_max_frm_size_cmd *req;
7764         struct hclge_desc desc;
7765
7766         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7767
7768         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7769         req->max_frm_size = cpu_to_le16(new_mps);
7770         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7771
7772         return hclge_cmd_send(&hdev->hw, &desc, 1);
7773 }
7774
7775 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7776 {
7777         struct hclge_vport *vport = hclge_get_vport(handle);
7778
7779         return hclge_set_vport_mtu(vport, new_mtu);
7780 }
7781
7782 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7783 {
7784         struct hclge_dev *hdev = vport->back;
7785         int i, max_frm_size, ret;
7786
7787         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7788         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7789             max_frm_size > HCLGE_MAC_MAX_FRAME)
7790                 return -EINVAL;
7791
7792         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7793         mutex_lock(&hdev->vport_lock);
7794         /* VF's mps must fit within hdev->mps */
7795         if (vport->vport_id && max_frm_size > hdev->mps) {
7796                 mutex_unlock(&hdev->vport_lock);
7797                 return -EINVAL;
7798         } else if (vport->vport_id) {
7799                 vport->mps = max_frm_size;
7800                 mutex_unlock(&hdev->vport_lock);
7801                 return 0;
7802         }
7803
7804         /* PF's mps must be greater then VF's mps */
7805         for (i = 1; i < hdev->num_alloc_vport; i++)
7806                 if (max_frm_size < hdev->vport[i].mps) {
7807                         mutex_unlock(&hdev->vport_lock);
7808                         return -EINVAL;
7809                 }
7810
7811         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7812
7813         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7814         if (ret) {
7815                 dev_err(&hdev->pdev->dev,
7816                         "Change mtu fail, ret =%d\n", ret);
7817                 goto out;
7818         }
7819
7820         hdev->mps = max_frm_size;
7821         vport->mps = max_frm_size;
7822
7823         ret = hclge_buffer_alloc(hdev);
7824         if (ret)
7825                 dev_err(&hdev->pdev->dev,
7826                         "Allocate buffer fail, ret =%d\n", ret);
7827
7828 out:
7829         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7830         mutex_unlock(&hdev->vport_lock);
7831         return ret;
7832 }
7833
7834 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7835                                     bool enable)
7836 {
7837         struct hclge_reset_tqp_queue_cmd *req;
7838         struct hclge_desc desc;
7839         int ret;
7840
7841         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7842
7843         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7844         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7845         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7846
7847         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7848         if (ret) {
7849                 dev_err(&hdev->pdev->dev,
7850                         "Send tqp reset cmd error, status =%d\n", ret);
7851                 return ret;
7852         }
7853
7854         return 0;
7855 }
7856
7857 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7858 {
7859         struct hclge_reset_tqp_queue_cmd *req;
7860         struct hclge_desc desc;
7861         int ret;
7862
7863         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7864
7865         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7866         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7867
7868         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7869         if (ret) {
7870                 dev_err(&hdev->pdev->dev,
7871                         "Get reset status error, status =%d\n", ret);
7872                 return ret;
7873         }
7874
7875         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7876 }
7877
7878 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7879 {
7880         struct hnae3_queue *queue;
7881         struct hclge_tqp *tqp;
7882
7883         queue = handle->kinfo.tqp[queue_id];
7884         tqp = container_of(queue, struct hclge_tqp, q);
7885
7886         return tqp->index;
7887 }
7888
7889 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7890 {
7891         struct hclge_vport *vport = hclge_get_vport(handle);
7892         struct hclge_dev *hdev = vport->back;
7893         int reset_try_times = 0;
7894         int reset_status;
7895         u16 queue_gid;
7896         int ret;
7897
7898         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7899
7900         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7901         if (ret) {
7902                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7903                 return ret;
7904         }
7905
7906         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7907         if (ret) {
7908                 dev_err(&hdev->pdev->dev,
7909                         "Send reset tqp cmd fail, ret = %d\n", ret);
7910                 return ret;
7911         }
7912
7913         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7914                 /* Wait for tqp hw reset */
7915                 msleep(20);
7916                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7917                 if (reset_status)
7918                         break;
7919         }
7920
7921         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7922                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7923                 return ret;
7924         }
7925
7926         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7927         if (ret)
7928                 dev_err(&hdev->pdev->dev,
7929                         "Deassert the soft reset fail, ret = %d\n", ret);
7930
7931         return ret;
7932 }
7933
7934 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7935 {
7936         struct hclge_dev *hdev = vport->back;
7937         int reset_try_times = 0;
7938         int reset_status;
7939         u16 queue_gid;
7940         int ret;
7941
7942         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7943
7944         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7945         if (ret) {
7946                 dev_warn(&hdev->pdev->dev,
7947                          "Send reset tqp cmd fail, ret = %d\n", ret);
7948                 return;
7949         }
7950
7951         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7952                 /* Wait for tqp hw reset */
7953                 msleep(20);
7954                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7955                 if (reset_status)
7956                         break;
7957         }
7958
7959         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7960                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7961                 return;
7962         }
7963
7964         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7965         if (ret)
7966                 dev_warn(&hdev->pdev->dev,
7967                          "Deassert the soft reset fail, ret = %d\n", ret);
7968 }
7969
7970 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7971 {
7972         struct hclge_vport *vport = hclge_get_vport(handle);
7973         struct hclge_dev *hdev = vport->back;
7974
7975         return hdev->fw_version;
7976 }
7977
7978 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7979 {
7980         struct phy_device *phydev = hdev->hw.mac.phydev;
7981
7982         if (!phydev)
7983                 return;
7984
7985         phy_set_asym_pause(phydev, rx_en, tx_en);
7986 }
7987
7988 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7989 {
7990         int ret;
7991
7992         if (rx_en && tx_en)
7993                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7994         else if (rx_en && !tx_en)
7995                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7996         else if (!rx_en && tx_en)
7997                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7998         else
7999                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8000
8001         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8002                 return 0;
8003
8004         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8005         if (ret) {
8006                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
8007                         ret);
8008                 return ret;
8009         }
8010
8011         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8012
8013         return 0;
8014 }
8015
8016 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8017 {
8018         struct phy_device *phydev = hdev->hw.mac.phydev;
8019         u16 remote_advertising = 0;
8020         u16 local_advertising;
8021         u32 rx_pause, tx_pause;
8022         u8 flowctl;
8023
8024         if (!phydev->link || !phydev->autoneg)
8025                 return 0;
8026
8027         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8028
8029         if (phydev->pause)
8030                 remote_advertising = LPA_PAUSE_CAP;
8031
8032         if (phydev->asym_pause)
8033                 remote_advertising |= LPA_PAUSE_ASYM;
8034
8035         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8036                                            remote_advertising);
8037         tx_pause = flowctl & FLOW_CTRL_TX;
8038         rx_pause = flowctl & FLOW_CTRL_RX;
8039
8040         if (phydev->duplex == HCLGE_MAC_HALF) {
8041                 tx_pause = 0;
8042                 rx_pause = 0;
8043         }
8044
8045         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8046 }
8047
8048 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8049                                  u32 *rx_en, u32 *tx_en)
8050 {
8051         struct hclge_vport *vport = hclge_get_vport(handle);
8052         struct hclge_dev *hdev = vport->back;
8053
8054         *auto_neg = hclge_get_autoneg(handle);
8055
8056         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8057                 *rx_en = 0;
8058                 *tx_en = 0;
8059                 return;
8060         }
8061
8062         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8063                 *rx_en = 1;
8064                 *tx_en = 0;
8065         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8066                 *tx_en = 1;
8067                 *rx_en = 0;
8068         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8069                 *rx_en = 1;
8070                 *tx_en = 1;
8071         } else {
8072                 *rx_en = 0;
8073                 *tx_en = 0;
8074         }
8075 }
8076
8077 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8078                                 u32 rx_en, u32 tx_en)
8079 {
8080         struct hclge_vport *vport = hclge_get_vport(handle);
8081         struct hclge_dev *hdev = vport->back;
8082         struct phy_device *phydev = hdev->hw.mac.phydev;
8083         u32 fc_autoneg;
8084
8085         fc_autoneg = hclge_get_autoneg(handle);
8086         if (auto_neg != fc_autoneg) {
8087                 dev_info(&hdev->pdev->dev,
8088                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8089                 return -EOPNOTSUPP;
8090         }
8091
8092         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8093                 dev_info(&hdev->pdev->dev,
8094                          "Priority flow control enabled. Cannot set link flow control.\n");
8095                 return -EOPNOTSUPP;
8096         }
8097
8098         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8099
8100         if (!fc_autoneg)
8101                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8102
8103         if (phydev)
8104                 return phy_start_aneg(phydev);
8105
8106         if (hdev->pdev->revision == 0x20)
8107                 return -EOPNOTSUPP;
8108
8109         return hclge_restart_autoneg(handle);
8110 }
8111
8112 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8113                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8114 {
8115         struct hclge_vport *vport = hclge_get_vport(handle);
8116         struct hclge_dev *hdev = vport->back;
8117
8118         if (speed)
8119                 *speed = hdev->hw.mac.speed;
8120         if (duplex)
8121                 *duplex = hdev->hw.mac.duplex;
8122         if (auto_neg)
8123                 *auto_neg = hdev->hw.mac.autoneg;
8124 }
8125
8126 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8127                                  u8 *module_type)
8128 {
8129         struct hclge_vport *vport = hclge_get_vport(handle);
8130         struct hclge_dev *hdev = vport->back;
8131
8132         if (media_type)
8133                 *media_type = hdev->hw.mac.media_type;
8134
8135         if (module_type)
8136                 *module_type = hdev->hw.mac.module_type;
8137 }
8138
8139 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8140                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8141 {
8142         struct hclge_vport *vport = hclge_get_vport(handle);
8143         struct hclge_dev *hdev = vport->back;
8144         struct phy_device *phydev = hdev->hw.mac.phydev;
8145         int mdix_ctrl, mdix, retval, is_resolved;
8146
8147         if (!phydev) {
8148                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8149                 *tp_mdix = ETH_TP_MDI_INVALID;
8150                 return;
8151         }
8152
8153         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8154
8155         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8156         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8157                                     HCLGE_PHY_MDIX_CTRL_S);
8158
8159         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8160         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8161         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8162
8163         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8164
8165         switch (mdix_ctrl) {
8166         case 0x0:
8167                 *tp_mdix_ctrl = ETH_TP_MDI;
8168                 break;
8169         case 0x1:
8170                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8171                 break;
8172         case 0x3:
8173                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8174                 break;
8175         default:
8176                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8177                 break;
8178         }
8179
8180         if (!is_resolved)
8181                 *tp_mdix = ETH_TP_MDI_INVALID;
8182         else if (mdix)
8183                 *tp_mdix = ETH_TP_MDI_X;
8184         else
8185                 *tp_mdix = ETH_TP_MDI;
8186 }
8187
8188 static void hclge_info_show(struct hclge_dev *hdev)
8189 {
8190         struct device *dev = &hdev->pdev->dev;
8191
8192         dev_info(dev, "PF info begin:\n");
8193
8194         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8195         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8196         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8197         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8198         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8199         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8200         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8201         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8202         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8203         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8204         dev_info(dev, "This is %s PF\n",
8205                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8206         dev_info(dev, "DCB %s\n",
8207                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8208         dev_info(dev, "MQPRIO %s\n",
8209                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8210
8211         dev_info(dev, "PF info end.\n");
8212 }
8213
8214 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8215                                           struct hclge_vport *vport)
8216 {
8217         struct hnae3_client *client = vport->nic.client;
8218         struct hclge_dev *hdev = ae_dev->priv;
8219         int ret;
8220
8221         ret = client->ops->init_instance(&vport->nic);
8222         if (ret)
8223                 return ret;
8224
8225         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8226         hnae3_set_client_init_flag(client, ae_dev, 1);
8227
8228         /* Enable nic hw error interrupts */
8229         ret = hclge_config_nic_hw_error(hdev, true);
8230         if (ret)
8231                 dev_err(&ae_dev->pdev->dev,
8232                         "fail(%d) to enable hw error interrupts\n", ret);
8233
8234         if (netif_msg_drv(&hdev->vport->nic))
8235                 hclge_info_show(hdev);
8236
8237         return ret;
8238 }
8239
8240 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8241                                            struct hclge_vport *vport)
8242 {
8243         struct hnae3_client *client = vport->roce.client;
8244         struct hclge_dev *hdev = ae_dev->priv;
8245         int ret;
8246
8247         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8248             !hdev->nic_client)
8249                 return 0;
8250
8251         client = hdev->roce_client;
8252         ret = hclge_init_roce_base_info(vport);
8253         if (ret)
8254                 return ret;
8255
8256         ret = client->ops->init_instance(&vport->roce);
8257         if (ret)
8258                 return ret;
8259
8260         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8261         hnae3_set_client_init_flag(client, ae_dev, 1);
8262
8263         return 0;
8264 }
8265
8266 static int hclge_init_client_instance(struct hnae3_client *client,
8267                                       struct hnae3_ae_dev *ae_dev)
8268 {
8269         struct hclge_dev *hdev = ae_dev->priv;
8270         struct hclge_vport *vport;
8271         int i, ret;
8272
8273         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8274                 vport = &hdev->vport[i];
8275
8276                 switch (client->type) {
8277                 case HNAE3_CLIENT_KNIC:
8278
8279                         hdev->nic_client = client;
8280                         vport->nic.client = client;
8281                         ret = hclge_init_nic_client_instance(ae_dev, vport);
8282                         if (ret)
8283                                 goto clear_nic;
8284
8285                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8286                         if (ret)
8287                                 goto clear_roce;
8288
8289                         break;
8290                 case HNAE3_CLIENT_ROCE:
8291                         if (hnae3_dev_roce_supported(hdev)) {
8292                                 hdev->roce_client = client;
8293                                 vport->roce.client = client;
8294                         }
8295
8296                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8297                         if (ret)
8298                                 goto clear_roce;
8299
8300                         break;
8301                 default:
8302                         return -EINVAL;
8303                 }
8304         }
8305
8306         /* Enable roce ras interrupts */
8307         ret = hclge_config_rocee_ras_interrupt(hdev, true);
8308         if (ret)
8309                 dev_err(&ae_dev->pdev->dev,
8310                         "fail(%d) to enable roce ras interrupts\n", ret);
8311
8312         return ret;
8313
8314 clear_nic:
8315         hdev->nic_client = NULL;
8316         vport->nic.client = NULL;
8317         return ret;
8318 clear_roce:
8319         hdev->roce_client = NULL;
8320         vport->roce.client = NULL;
8321         return ret;
8322 }
8323
8324 static void hclge_uninit_client_instance(struct hnae3_client *client,
8325                                          struct hnae3_ae_dev *ae_dev)
8326 {
8327         struct hclge_dev *hdev = ae_dev->priv;
8328         struct hclge_vport *vport;
8329         int i;
8330
8331         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8332                 vport = &hdev->vport[i];
8333                 if (hdev->roce_client) {
8334                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8335                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8336                                                                 0);
8337                         hdev->roce_client = NULL;
8338                         vport->roce.client = NULL;
8339                 }
8340                 if (client->type == HNAE3_CLIENT_ROCE)
8341                         return;
8342                 if (hdev->nic_client && client->ops->uninit_instance) {
8343                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8344                         client->ops->uninit_instance(&vport->nic, 0);
8345                         hdev->nic_client = NULL;
8346                         vport->nic.client = NULL;
8347                 }
8348         }
8349 }
8350
8351 static int hclge_pci_init(struct hclge_dev *hdev)
8352 {
8353         struct pci_dev *pdev = hdev->pdev;
8354         struct hclge_hw *hw;
8355         int ret;
8356
8357         ret = pci_enable_device(pdev);
8358         if (ret) {
8359                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8360                 return ret;
8361         }
8362
8363         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8364         if (ret) {
8365                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8366                 if (ret) {
8367                         dev_err(&pdev->dev,
8368                                 "can't set consistent PCI DMA");
8369                         goto err_disable_device;
8370                 }
8371                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8372         }
8373
8374         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8375         if (ret) {
8376                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8377                 goto err_disable_device;
8378         }
8379
8380         pci_set_master(pdev);
8381         hw = &hdev->hw;
8382         hw->io_base = pcim_iomap(pdev, 2, 0);
8383         if (!hw->io_base) {
8384                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8385                 ret = -ENOMEM;
8386                 goto err_clr_master;
8387         }
8388
8389         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8390
8391         return 0;
8392 err_clr_master:
8393         pci_clear_master(pdev);
8394         pci_release_regions(pdev);
8395 err_disable_device:
8396         pci_disable_device(pdev);
8397
8398         return ret;
8399 }
8400
8401 static void hclge_pci_uninit(struct hclge_dev *hdev)
8402 {
8403         struct pci_dev *pdev = hdev->pdev;
8404
8405         pcim_iounmap(pdev, hdev->hw.io_base);
8406         pci_free_irq_vectors(pdev);
8407         pci_clear_master(pdev);
8408         pci_release_mem_regions(pdev);
8409         pci_disable_device(pdev);
8410 }
8411
8412 static void hclge_state_init(struct hclge_dev *hdev)
8413 {
8414         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8415         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8416         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8417         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8418         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8419         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8420 }
8421
8422 static void hclge_state_uninit(struct hclge_dev *hdev)
8423 {
8424         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8425         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8426
8427         if (hdev->service_timer.function)
8428                 del_timer_sync(&hdev->service_timer);
8429         if (hdev->reset_timer.function)
8430                 del_timer_sync(&hdev->reset_timer);
8431         if (hdev->service_task.func)
8432                 cancel_work_sync(&hdev->service_task);
8433         if (hdev->rst_service_task.func)
8434                 cancel_work_sync(&hdev->rst_service_task);
8435         if (hdev->mbx_service_task.func)
8436                 cancel_work_sync(&hdev->mbx_service_task);
8437 }
8438
8439 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8440 {
8441 #define HCLGE_FLR_WAIT_MS       100
8442 #define HCLGE_FLR_WAIT_CNT      50
8443         struct hclge_dev *hdev = ae_dev->priv;
8444         int cnt = 0;
8445
8446         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8447         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8448         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8449         hclge_reset_event(hdev->pdev, NULL);
8450
8451         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8452                cnt++ < HCLGE_FLR_WAIT_CNT)
8453                 msleep(HCLGE_FLR_WAIT_MS);
8454
8455         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8456                 dev_err(&hdev->pdev->dev,
8457                         "flr wait down timeout: %d\n", cnt);
8458 }
8459
8460 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8461 {
8462         struct hclge_dev *hdev = ae_dev->priv;
8463
8464         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8465 }
8466
8467 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8468 {
8469         struct pci_dev *pdev = ae_dev->pdev;
8470         struct hclge_dev *hdev;
8471         int ret;
8472
8473         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8474         if (!hdev) {
8475                 ret = -ENOMEM;
8476                 goto out;
8477         }
8478
8479         hdev->pdev = pdev;
8480         hdev->ae_dev = ae_dev;
8481         hdev->reset_type = HNAE3_NONE_RESET;
8482         hdev->reset_level = HNAE3_FUNC_RESET;
8483         ae_dev->priv = hdev;
8484         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8485
8486         mutex_init(&hdev->vport_lock);
8487         mutex_init(&hdev->vport_cfg_mutex);
8488         spin_lock_init(&hdev->fd_rule_lock);
8489
8490         ret = hclge_pci_init(hdev);
8491         if (ret) {
8492                 dev_err(&pdev->dev, "PCI init failed\n");
8493                 goto out;
8494         }
8495
8496         /* Firmware command queue initialize */
8497         ret = hclge_cmd_queue_init(hdev);
8498         if (ret) {
8499                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8500                 goto err_pci_uninit;
8501         }
8502
8503         /* Firmware command initialize */
8504         ret = hclge_cmd_init(hdev);
8505         if (ret)
8506                 goto err_cmd_uninit;
8507
8508         ret = hclge_get_cap(hdev);
8509         if (ret) {
8510                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8511                         ret);
8512                 goto err_cmd_uninit;
8513         }
8514
8515         ret = hclge_configure(hdev);
8516         if (ret) {
8517                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8518                 goto err_cmd_uninit;
8519         }
8520
8521         ret = hclge_init_msi(hdev);
8522         if (ret) {
8523                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8524                 goto err_cmd_uninit;
8525         }
8526
8527         ret = hclge_misc_irq_init(hdev);
8528         if (ret) {
8529                 dev_err(&pdev->dev,
8530                         "Misc IRQ(vector0) init error, ret = %d.\n",
8531                         ret);
8532                 goto err_msi_uninit;
8533         }
8534
8535         ret = hclge_alloc_tqps(hdev);
8536         if (ret) {
8537                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8538                 goto err_msi_irq_uninit;
8539         }
8540
8541         ret = hclge_alloc_vport(hdev);
8542         if (ret) {
8543                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8544                 goto err_msi_irq_uninit;
8545         }
8546
8547         ret = hclge_map_tqp(hdev);
8548         if (ret) {
8549                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8550                 goto err_msi_irq_uninit;
8551         }
8552
8553         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8554                 ret = hclge_mac_mdio_config(hdev);
8555                 if (ret) {
8556                         dev_err(&hdev->pdev->dev,
8557                                 "mdio config fail ret=%d\n", ret);
8558                         goto err_msi_irq_uninit;
8559                 }
8560         }
8561
8562         ret = hclge_init_umv_space(hdev);
8563         if (ret) {
8564                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8565                 goto err_mdiobus_unreg;
8566         }
8567
8568         ret = hclge_mac_init(hdev);
8569         if (ret) {
8570                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8571                 goto err_mdiobus_unreg;
8572         }
8573
8574         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8575         if (ret) {
8576                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8577                 goto err_mdiobus_unreg;
8578         }
8579
8580         ret = hclge_config_gro(hdev, true);
8581         if (ret)
8582                 goto err_mdiobus_unreg;
8583
8584         ret = hclge_init_vlan_config(hdev);
8585         if (ret) {
8586                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8587                 goto err_mdiobus_unreg;
8588         }
8589
8590         ret = hclge_tm_schd_init(hdev);
8591         if (ret) {
8592                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8593                 goto err_mdiobus_unreg;
8594         }
8595
8596         hclge_rss_init_cfg(hdev);
8597         ret = hclge_rss_init_hw(hdev);
8598         if (ret) {
8599                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8600                 goto err_mdiobus_unreg;
8601         }
8602
8603         ret = init_mgr_tbl(hdev);
8604         if (ret) {
8605                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8606                 goto err_mdiobus_unreg;
8607         }
8608
8609         ret = hclge_init_fd_config(hdev);
8610         if (ret) {
8611                 dev_err(&pdev->dev,
8612                         "fd table init fail, ret=%d\n", ret);
8613                 goto err_mdiobus_unreg;
8614         }
8615
8616         INIT_KFIFO(hdev->mac_tnl_log);
8617
8618         hclge_dcb_ops_set(hdev);
8619
8620         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8621         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8622         INIT_WORK(&hdev->service_task, hclge_service_task);
8623         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8624         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8625
8626         hclge_clear_all_event_cause(hdev);
8627
8628         /* Enable MISC vector(vector0) */
8629         hclge_enable_vector(&hdev->misc_vector, true);
8630
8631         hclge_state_init(hdev);
8632         hdev->last_reset_time = jiffies;
8633
8634         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8635         return 0;
8636
8637 err_mdiobus_unreg:
8638         if (hdev->hw.mac.phydev)
8639                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8640 err_msi_irq_uninit:
8641         hclge_misc_irq_uninit(hdev);
8642 err_msi_uninit:
8643         pci_free_irq_vectors(pdev);
8644 err_cmd_uninit:
8645         hclge_cmd_uninit(hdev);
8646 err_pci_uninit:
8647         pcim_iounmap(pdev, hdev->hw.io_base);
8648         pci_clear_master(pdev);
8649         pci_release_regions(pdev);
8650         pci_disable_device(pdev);
8651 out:
8652         return ret;
8653 }
8654
8655 static void hclge_stats_clear(struct hclge_dev *hdev)
8656 {
8657         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8658 }
8659
8660 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8661 {
8662         struct hclge_vport *vport = hdev->vport;
8663         int i;
8664
8665         for (i = 0; i < hdev->num_alloc_vport; i++) {
8666                 hclge_vport_stop(vport);
8667                 vport++;
8668         }
8669 }
8670
8671 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8672 {
8673         struct hclge_dev *hdev = ae_dev->priv;
8674         struct pci_dev *pdev = ae_dev->pdev;
8675         int ret;
8676
8677         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8678
8679         hclge_stats_clear(hdev);
8680         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8681         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8682
8683         ret = hclge_cmd_init(hdev);
8684         if (ret) {
8685                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8686                 return ret;
8687         }
8688
8689         ret = hclge_map_tqp(hdev);
8690         if (ret) {
8691                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8692                 return ret;
8693         }
8694
8695         hclge_reset_umv_space(hdev);
8696
8697         ret = hclge_mac_init(hdev);
8698         if (ret) {
8699                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8700                 return ret;
8701         }
8702
8703         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8704         if (ret) {
8705                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8706                 return ret;
8707         }
8708
8709         ret = hclge_config_gro(hdev, true);
8710         if (ret)
8711                 return ret;
8712
8713         ret = hclge_init_vlan_config(hdev);
8714         if (ret) {
8715                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8716                 return ret;
8717         }
8718
8719         ret = hclge_tm_init_hw(hdev, true);
8720         if (ret) {
8721                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8722                 return ret;
8723         }
8724
8725         ret = hclge_rss_init_hw(hdev);
8726         if (ret) {
8727                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8728                 return ret;
8729         }
8730
8731         ret = hclge_init_fd_config(hdev);
8732         if (ret) {
8733                 dev_err(&pdev->dev,
8734                         "fd table init fail, ret=%d\n", ret);
8735                 return ret;
8736         }
8737
8738         /* Re-enable the hw error interrupts because
8739          * the interrupts get disabled on global reset.
8740          */
8741         ret = hclge_config_nic_hw_error(hdev, true);
8742         if (ret) {
8743                 dev_err(&pdev->dev,
8744                         "fail(%d) to re-enable NIC hw error interrupts\n",
8745                         ret);
8746                 return ret;
8747         }
8748
8749         if (hdev->roce_client) {
8750                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8751                 if (ret) {
8752                         dev_err(&pdev->dev,
8753                                 "fail(%d) to re-enable roce ras interrupts\n",
8754                                 ret);
8755                         return ret;
8756                 }
8757         }
8758
8759         hclge_reset_vport_state(hdev);
8760
8761         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8762                  HCLGE_DRIVER_NAME);
8763
8764         return 0;
8765 }
8766
8767 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8768 {
8769         struct hclge_dev *hdev = ae_dev->priv;
8770         struct hclge_mac *mac = &hdev->hw.mac;
8771
8772         hclge_state_uninit(hdev);
8773
8774         if (mac->phydev)
8775                 mdiobus_unregister(mac->mdio_bus);
8776
8777         hclge_uninit_umv_space(hdev);
8778
8779         /* Disable MISC vector(vector0) */
8780         hclge_enable_vector(&hdev->misc_vector, false);
8781         synchronize_irq(hdev->misc_vector.vector_irq);
8782
8783         /* Disable all hw interrupts */
8784         hclge_config_mac_tnl_int(hdev, false);
8785         hclge_config_nic_hw_error(hdev, false);
8786         hclge_config_rocee_ras_interrupt(hdev, false);
8787
8788         hclge_cmd_uninit(hdev);
8789         hclge_misc_irq_uninit(hdev);
8790         hclge_pci_uninit(hdev);
8791         mutex_destroy(&hdev->vport_lock);
8792         hclge_uninit_vport_mac_table(hdev);
8793         hclge_uninit_vport_vlan_table(hdev);
8794         mutex_destroy(&hdev->vport_cfg_mutex);
8795         ae_dev->priv = NULL;
8796 }
8797
8798 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8799 {
8800         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8801         struct hclge_vport *vport = hclge_get_vport(handle);
8802         struct hclge_dev *hdev = vport->back;
8803
8804         return min_t(u32, hdev->rss_size_max,
8805                      vport->alloc_tqps / kinfo->num_tc);
8806 }
8807
8808 static void hclge_get_channels(struct hnae3_handle *handle,
8809                                struct ethtool_channels *ch)
8810 {
8811         ch->max_combined = hclge_get_max_channels(handle);
8812         ch->other_count = 1;
8813         ch->max_other = 1;
8814         ch->combined_count = handle->kinfo.rss_size;
8815 }
8816
8817 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8818                                         u16 *alloc_tqps, u16 *max_rss_size)
8819 {
8820         struct hclge_vport *vport = hclge_get_vport(handle);
8821         struct hclge_dev *hdev = vport->back;
8822
8823         *alloc_tqps = vport->alloc_tqps;
8824         *max_rss_size = hdev->rss_size_max;
8825 }
8826
8827 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8828                               bool rxfh_configured)
8829 {
8830         struct hclge_vport *vport = hclge_get_vport(handle);
8831         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8832         struct hclge_dev *hdev = vport->back;
8833         int cur_rss_size = kinfo->rss_size;
8834         int cur_tqps = kinfo->num_tqps;
8835         u16 tc_offset[HCLGE_MAX_TC_NUM];
8836         u16 tc_valid[HCLGE_MAX_TC_NUM];
8837         u16 tc_size[HCLGE_MAX_TC_NUM];
8838         u16 roundup_size;
8839         u32 *rss_indir;
8840         int ret, i;
8841
8842         kinfo->req_rss_size = new_tqps_num;
8843
8844         ret = hclge_tm_vport_map_update(hdev);
8845         if (ret) {
8846                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8847                 return ret;
8848         }
8849
8850         roundup_size = roundup_pow_of_two(kinfo->rss_size);
8851         roundup_size = ilog2(roundup_size);
8852         /* Set the RSS TC mode according to the new RSS size */
8853         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8854                 tc_valid[i] = 0;
8855
8856                 if (!(hdev->hw_tc_map & BIT(i)))
8857                         continue;
8858
8859                 tc_valid[i] = 1;
8860                 tc_size[i] = roundup_size;
8861                 tc_offset[i] = kinfo->rss_size * i;
8862         }
8863         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8864         if (ret)
8865                 return ret;
8866
8867         /* RSS indirection table has been configuared by user */
8868         if (rxfh_configured)
8869                 goto out;
8870
8871         /* Reinitializes the rss indirect table according to the new RSS size */
8872         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8873         if (!rss_indir)
8874                 return -ENOMEM;
8875
8876         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8877                 rss_indir[i] = i % kinfo->rss_size;
8878
8879         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8880         if (ret)
8881                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8882                         ret);
8883
8884         kfree(rss_indir);
8885
8886 out:
8887         if (!ret)
8888                 dev_info(&hdev->pdev->dev,
8889                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8890                          cur_rss_size, kinfo->rss_size,
8891                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
8892
8893         return ret;
8894 }
8895
8896 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8897                               u32 *regs_num_64_bit)
8898 {
8899         struct hclge_desc desc;
8900         u32 total_num;
8901         int ret;
8902
8903         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8904         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8905         if (ret) {
8906                 dev_err(&hdev->pdev->dev,
8907                         "Query register number cmd failed, ret = %d.\n", ret);
8908                 return ret;
8909         }
8910
8911         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8912         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8913
8914         total_num = *regs_num_32_bit + *regs_num_64_bit;
8915         if (!total_num)
8916                 return -EINVAL;
8917
8918         return 0;
8919 }
8920
8921 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8922                                  void *data)
8923 {
8924 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8925 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
8926
8927         struct hclge_desc *desc;
8928         u32 *reg_val = data;
8929         __le32 *desc_data;
8930         int nodata_num;
8931         int cmd_num;
8932         int i, k, n;
8933         int ret;
8934
8935         if (regs_num == 0)
8936                 return 0;
8937
8938         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
8939         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
8940                                HCLGE_32_BIT_REG_RTN_DATANUM);
8941         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8942         if (!desc)
8943                 return -ENOMEM;
8944
8945         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8946         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8947         if (ret) {
8948                 dev_err(&hdev->pdev->dev,
8949                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8950                 kfree(desc);
8951                 return ret;
8952         }
8953
8954         for (i = 0; i < cmd_num; i++) {
8955                 if (i == 0) {
8956                         desc_data = (__le32 *)(&desc[i].data[0]);
8957                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
8958                 } else {
8959                         desc_data = (__le32 *)(&desc[i]);
8960                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8961                 }
8962                 for (k = 0; k < n; k++) {
8963                         *reg_val++ = le32_to_cpu(*desc_data++);
8964
8965                         regs_num--;
8966                         if (!regs_num)
8967                                 break;
8968                 }
8969         }
8970
8971         kfree(desc);
8972         return 0;
8973 }
8974
8975 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8976                                  void *data)
8977 {
8978 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8979 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
8980
8981         struct hclge_desc *desc;
8982         u64 *reg_val = data;
8983         __le64 *desc_data;
8984         int nodata_len;
8985         int cmd_num;
8986         int i, k, n;
8987         int ret;
8988
8989         if (regs_num == 0)
8990                 return 0;
8991
8992         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
8993         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
8994                                HCLGE_64_BIT_REG_RTN_DATANUM);
8995         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8996         if (!desc)
8997                 return -ENOMEM;
8998
8999         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9000         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9001         if (ret) {
9002                 dev_err(&hdev->pdev->dev,
9003                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
9004                 kfree(desc);
9005                 return ret;
9006         }
9007
9008         for (i = 0; i < cmd_num; i++) {
9009                 if (i == 0) {
9010                         desc_data = (__le64 *)(&desc[i].data[0]);
9011                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9012                 } else {
9013                         desc_data = (__le64 *)(&desc[i]);
9014                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
9015                 }
9016                 for (k = 0; k < n; k++) {
9017                         *reg_val++ = le64_to_cpu(*desc_data++);
9018
9019                         regs_num--;
9020                         if (!regs_num)
9021                                 break;
9022                 }
9023         }
9024
9025         kfree(desc);
9026         return 0;
9027 }
9028
9029 #define MAX_SEPARATE_NUM        4
9030 #define SEPARATOR_VALUE         0xFFFFFFFF
9031 #define REG_NUM_PER_LINE        4
9032 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
9033
9034 static int hclge_get_regs_len(struct hnae3_handle *handle)
9035 {
9036         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9037         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9038         struct hclge_vport *vport = hclge_get_vport(handle);
9039         struct hclge_dev *hdev = vport->back;
9040         u32 regs_num_32_bit, regs_num_64_bit;
9041         int ret;
9042
9043         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9044         if (ret) {
9045                 dev_err(&hdev->pdev->dev,
9046                         "Get register number failed, ret = %d.\n", ret);
9047                 return -EOPNOTSUPP;
9048         }
9049
9050         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9051         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9052         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9053         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9054
9055         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9056                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9057                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9058 }
9059
9060 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9061                            void *data)
9062 {
9063         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9064         struct hclge_vport *vport = hclge_get_vport(handle);
9065         struct hclge_dev *hdev = vport->back;
9066         u32 regs_num_32_bit, regs_num_64_bit;
9067         int i, j, reg_um, separator_num;
9068         u32 *reg = data;
9069         int ret;
9070
9071         *version = hdev->fw_version;
9072
9073         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9074         if (ret) {
9075                 dev_err(&hdev->pdev->dev,
9076                         "Get register number failed, ret = %d.\n", ret);
9077                 return;
9078         }
9079
9080         /* fetching per-PF registers valus from PF PCIe register space */
9081         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9082         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9083         for (i = 0; i < reg_um; i++)
9084                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9085         for (i = 0; i < separator_num; i++)
9086                 *reg++ = SEPARATOR_VALUE;
9087
9088         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9089         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9090         for (i = 0; i < reg_um; i++)
9091                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9092         for (i = 0; i < separator_num; i++)
9093                 *reg++ = SEPARATOR_VALUE;
9094
9095         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9096         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9097         for (j = 0; j < kinfo->num_tqps; j++) {
9098                 for (i = 0; i < reg_um; i++)
9099                         *reg++ = hclge_read_dev(&hdev->hw,
9100                                                 ring_reg_addr_list[i] +
9101                                                 0x200 * j);
9102                 for (i = 0; i < separator_num; i++)
9103                         *reg++ = SEPARATOR_VALUE;
9104         }
9105
9106         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9107         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9108         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9109                 for (i = 0; i < reg_um; i++)
9110                         *reg++ = hclge_read_dev(&hdev->hw,
9111                                                 tqp_intr_reg_addr_list[i] +
9112                                                 4 * j);
9113                 for (i = 0; i < separator_num; i++)
9114                         *reg++ = SEPARATOR_VALUE;
9115         }
9116
9117         /* fetching PF common registers values from firmware */
9118         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9119         if (ret) {
9120                 dev_err(&hdev->pdev->dev,
9121                         "Get 32 bit register failed, ret = %d.\n", ret);
9122                 return;
9123         }
9124
9125         reg += regs_num_32_bit;
9126         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9127         if (ret)
9128                 dev_err(&hdev->pdev->dev,
9129                         "Get 64 bit register failed, ret = %d.\n", ret);
9130 }
9131
9132 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9133 {
9134         struct hclge_set_led_state_cmd *req;
9135         struct hclge_desc desc;
9136         int ret;
9137
9138         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9139
9140         req = (struct hclge_set_led_state_cmd *)desc.data;
9141         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9142                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9143
9144         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9145         if (ret)
9146                 dev_err(&hdev->pdev->dev,
9147                         "Send set led state cmd error, ret =%d\n", ret);
9148
9149         return ret;
9150 }
9151
9152 enum hclge_led_status {
9153         HCLGE_LED_OFF,
9154         HCLGE_LED_ON,
9155         HCLGE_LED_NO_CHANGE = 0xFF,
9156 };
9157
9158 static int hclge_set_led_id(struct hnae3_handle *handle,
9159                             enum ethtool_phys_id_state status)
9160 {
9161         struct hclge_vport *vport = hclge_get_vport(handle);
9162         struct hclge_dev *hdev = vport->back;
9163
9164         switch (status) {
9165         case ETHTOOL_ID_ACTIVE:
9166                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9167         case ETHTOOL_ID_INACTIVE:
9168                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9169         default:
9170                 return -EINVAL;
9171         }
9172 }
9173
9174 static void hclge_get_link_mode(struct hnae3_handle *handle,
9175                                 unsigned long *supported,
9176                                 unsigned long *advertising)
9177 {
9178         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9179         struct hclge_vport *vport = hclge_get_vport(handle);
9180         struct hclge_dev *hdev = vport->back;
9181         unsigned int idx = 0;
9182
9183         for (; idx < size; idx++) {
9184                 supported[idx] = hdev->hw.mac.supported[idx];
9185                 advertising[idx] = hdev->hw.mac.advertising[idx];
9186         }
9187 }
9188
9189 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9190 {
9191         struct hclge_vport *vport = hclge_get_vport(handle);
9192         struct hclge_dev *hdev = vport->back;
9193
9194         return hclge_config_gro(hdev, enable);
9195 }
9196
9197 static const struct hnae3_ae_ops hclge_ops = {
9198         .init_ae_dev = hclge_init_ae_dev,
9199         .uninit_ae_dev = hclge_uninit_ae_dev,
9200         .flr_prepare = hclge_flr_prepare,
9201         .flr_done = hclge_flr_done,
9202         .init_client_instance = hclge_init_client_instance,
9203         .uninit_client_instance = hclge_uninit_client_instance,
9204         .map_ring_to_vector = hclge_map_ring_to_vector,
9205         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9206         .get_vector = hclge_get_vector,
9207         .put_vector = hclge_put_vector,
9208         .set_promisc_mode = hclge_set_promisc_mode,
9209         .set_loopback = hclge_set_loopback,
9210         .start = hclge_ae_start,
9211         .stop = hclge_ae_stop,
9212         .client_start = hclge_client_start,
9213         .client_stop = hclge_client_stop,
9214         .get_status = hclge_get_status,
9215         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9216         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9217         .get_media_type = hclge_get_media_type,
9218         .check_port_speed = hclge_check_port_speed,
9219         .get_fec = hclge_get_fec,
9220         .set_fec = hclge_set_fec,
9221         .get_rss_key_size = hclge_get_rss_key_size,
9222         .get_rss_indir_size = hclge_get_rss_indir_size,
9223         .get_rss = hclge_get_rss,
9224         .set_rss = hclge_set_rss,
9225         .set_rss_tuple = hclge_set_rss_tuple,
9226         .get_rss_tuple = hclge_get_rss_tuple,
9227         .get_tc_size = hclge_get_tc_size,
9228         .get_mac_addr = hclge_get_mac_addr,
9229         .set_mac_addr = hclge_set_mac_addr,
9230         .do_ioctl = hclge_do_ioctl,
9231         .add_uc_addr = hclge_add_uc_addr,
9232         .rm_uc_addr = hclge_rm_uc_addr,
9233         .add_mc_addr = hclge_add_mc_addr,
9234         .rm_mc_addr = hclge_rm_mc_addr,
9235         .set_autoneg = hclge_set_autoneg,
9236         .get_autoneg = hclge_get_autoneg,
9237         .restart_autoneg = hclge_restart_autoneg,
9238         .get_pauseparam = hclge_get_pauseparam,
9239         .set_pauseparam = hclge_set_pauseparam,
9240         .set_mtu = hclge_set_mtu,
9241         .reset_queue = hclge_reset_tqp,
9242         .get_stats = hclge_get_stats,
9243         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9244         .update_stats = hclge_update_stats,
9245         .get_strings = hclge_get_strings,
9246         .get_sset_count = hclge_get_sset_count,
9247         .get_fw_version = hclge_get_fw_version,
9248         .get_mdix_mode = hclge_get_mdix_mode,
9249         .enable_vlan_filter = hclge_enable_vlan_filter,
9250         .set_vlan_filter = hclge_set_vlan_filter,
9251         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9252         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9253         .reset_event = hclge_reset_event,
9254         .set_default_reset_request = hclge_set_def_reset_request,
9255         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9256         .set_channels = hclge_set_channels,
9257         .get_channels = hclge_get_channels,
9258         .get_regs_len = hclge_get_regs_len,
9259         .get_regs = hclge_get_regs,
9260         .set_led_id = hclge_set_led_id,
9261         .get_link_mode = hclge_get_link_mode,
9262         .add_fd_entry = hclge_add_fd_entry,
9263         .del_fd_entry = hclge_del_fd_entry,
9264         .del_all_fd_entries = hclge_del_all_fd_entries,
9265         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9266         .get_fd_rule_info = hclge_get_fd_rule_info,
9267         .get_fd_all_rules = hclge_get_all_rules,
9268         .restore_fd_rules = hclge_restore_fd_entries,
9269         .enable_fd = hclge_enable_fd,
9270         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9271         .dbg_run_cmd = hclge_dbg_run_cmd,
9272         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9273         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9274         .ae_dev_resetting = hclge_ae_dev_resetting,
9275         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9276         .set_gro_en = hclge_gro_en,
9277         .get_global_queue_id = hclge_covert_handle_qid_global,
9278         .set_timer_task = hclge_set_timer_task,
9279         .mac_connect_phy = hclge_mac_connect_phy,
9280         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9281         .restore_vlan_table = hclge_restore_vlan_table,
9282 };
9283
9284 static struct hnae3_ae_algo ae_algo = {
9285         .ops = &hclge_ops,
9286         .pdev_id_table = ae_algo_pci_tbl,
9287 };
9288
9289 static int hclge_init(void)
9290 {
9291         pr_info("%s is initializing\n", HCLGE_NAME);
9292
9293         hnae3_register_ae_algo(&ae_algo);
9294
9295         return 0;
9296 }
9297
9298 static void hclge_exit(void)
9299 {
9300         hnae3_unregister_ae_algo(&ae_algo);
9301 }
9302 module_init(hclge_init);
9303 module_exit(hclge_exit);
9304
9305 MODULE_LICENSE("GPL");
9306 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9307 MODULE_DESCRIPTION("HCLGE Driver");
9308 MODULE_VERSION(HCLGE_MOD_VERSION);