net: hns3: some variable modification
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33
34 #define HCLGE_RESET_MAX_FAIL_CNT        5
35
36 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
37 static int hclge_init_vlan_config(struct hclge_dev *hdev);
38 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
39 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
40 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
41                                u16 *allocated_size, bool is_alloc);
42 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
43 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
44 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
45                                                    unsigned long *addr);
46
47 static struct hnae3_ae_algo ae_algo;
48
49 static const struct pci_device_id ae_algo_pci_tbl[] = {
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
51         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
52         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
53         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
54         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
55         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
56         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
57         /* required last entry */
58         {0, }
59 };
60
61 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
62
63 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
64                                          HCLGE_CMDQ_TX_ADDR_H_REG,
65                                          HCLGE_CMDQ_TX_DEPTH_REG,
66                                          HCLGE_CMDQ_TX_TAIL_REG,
67                                          HCLGE_CMDQ_TX_HEAD_REG,
68                                          HCLGE_CMDQ_RX_ADDR_L_REG,
69                                          HCLGE_CMDQ_RX_ADDR_H_REG,
70                                          HCLGE_CMDQ_RX_DEPTH_REG,
71                                          HCLGE_CMDQ_RX_TAIL_REG,
72                                          HCLGE_CMDQ_RX_HEAD_REG,
73                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
74                                          HCLGE_CMDQ_INTR_STS_REG,
75                                          HCLGE_CMDQ_INTR_EN_REG,
76                                          HCLGE_CMDQ_INTR_GEN_REG};
77
78 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
79                                            HCLGE_VECTOR0_OTER_EN_REG,
80                                            HCLGE_MISC_RESET_STS_REG,
81                                            HCLGE_MISC_VECTOR_INT_STS,
82                                            HCLGE_GLOBAL_RESET_REG,
83                                            HCLGE_FUN_RST_ING,
84                                            HCLGE_GRO_EN_REG};
85
86 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
87                                          HCLGE_RING_RX_ADDR_H_REG,
88                                          HCLGE_RING_RX_BD_NUM_REG,
89                                          HCLGE_RING_RX_BD_LENGTH_REG,
90                                          HCLGE_RING_RX_MERGE_EN_REG,
91                                          HCLGE_RING_RX_TAIL_REG,
92                                          HCLGE_RING_RX_HEAD_REG,
93                                          HCLGE_RING_RX_FBD_NUM_REG,
94                                          HCLGE_RING_RX_OFFSET_REG,
95                                          HCLGE_RING_RX_FBD_OFFSET_REG,
96                                          HCLGE_RING_RX_STASH_REG,
97                                          HCLGE_RING_RX_BD_ERR_REG,
98                                          HCLGE_RING_TX_ADDR_L_REG,
99                                          HCLGE_RING_TX_ADDR_H_REG,
100                                          HCLGE_RING_TX_BD_NUM_REG,
101                                          HCLGE_RING_TX_PRIORITY_REG,
102                                          HCLGE_RING_TX_TC_REG,
103                                          HCLGE_RING_TX_MERGE_EN_REG,
104                                          HCLGE_RING_TX_TAIL_REG,
105                                          HCLGE_RING_TX_HEAD_REG,
106                                          HCLGE_RING_TX_FBD_NUM_REG,
107                                          HCLGE_RING_TX_OFFSET_REG,
108                                          HCLGE_RING_TX_EBD_NUM_REG,
109                                          HCLGE_RING_TX_EBD_OFFSET_REG,
110                                          HCLGE_RING_TX_BD_ERR_REG,
111                                          HCLGE_RING_EN_REG};
112
113 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
114                                              HCLGE_TQP_INTR_GL0_REG,
115                                              HCLGE_TQP_INTR_GL1_REG,
116                                              HCLGE_TQP_INTR_GL2_REG,
117                                              HCLGE_TQP_INTR_RL_REG};
118
119 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
120         "App    Loopback test",
121         "Serdes serial Loopback test",
122         "Serdes parallel Loopback test",
123         "Phy    Loopback test"
124 };
125
126 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
127         {"mac_tx_mac_pause_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
129         {"mac_rx_mac_pause_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
131         {"mac_tx_control_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
133         {"mac_rx_control_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
135         {"mac_tx_pfc_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
137         {"mac_tx_pfc_pri0_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
139         {"mac_tx_pfc_pri1_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
141         {"mac_tx_pfc_pri2_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
143         {"mac_tx_pfc_pri3_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
145         {"mac_tx_pfc_pri4_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
147         {"mac_tx_pfc_pri5_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
149         {"mac_tx_pfc_pri6_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
151         {"mac_tx_pfc_pri7_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
153         {"mac_rx_pfc_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
155         {"mac_rx_pfc_pri0_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
157         {"mac_rx_pfc_pri1_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
159         {"mac_rx_pfc_pri2_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
161         {"mac_rx_pfc_pri3_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
163         {"mac_rx_pfc_pri4_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
165         {"mac_rx_pfc_pri5_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
167         {"mac_rx_pfc_pri6_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
169         {"mac_rx_pfc_pri7_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
171         {"mac_tx_total_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
173         {"mac_tx_total_oct_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
175         {"mac_tx_good_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
177         {"mac_tx_bad_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
179         {"mac_tx_good_oct_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
181         {"mac_tx_bad_oct_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
183         {"mac_tx_uni_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
185         {"mac_tx_multi_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
187         {"mac_tx_broad_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
189         {"mac_tx_undersize_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
191         {"mac_tx_oversize_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
193         {"mac_tx_64_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
195         {"mac_tx_65_127_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
197         {"mac_tx_128_255_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
199         {"mac_tx_256_511_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
201         {"mac_tx_512_1023_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
203         {"mac_tx_1024_1518_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
205         {"mac_tx_1519_2047_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
207         {"mac_tx_2048_4095_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
209         {"mac_tx_4096_8191_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
211         {"mac_tx_8192_9216_oct_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
213         {"mac_tx_9217_12287_oct_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
215         {"mac_tx_12288_16383_oct_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
217         {"mac_tx_1519_max_good_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
219         {"mac_tx_1519_max_bad_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
221         {"mac_rx_total_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
223         {"mac_rx_total_oct_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
225         {"mac_rx_good_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
227         {"mac_rx_bad_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
229         {"mac_rx_good_oct_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
231         {"mac_rx_bad_oct_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
233         {"mac_rx_uni_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
235         {"mac_rx_multi_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
237         {"mac_rx_broad_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
239         {"mac_rx_undersize_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
241         {"mac_rx_oversize_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
243         {"mac_rx_64_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
245         {"mac_rx_65_127_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
247         {"mac_rx_128_255_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
249         {"mac_rx_256_511_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
251         {"mac_rx_512_1023_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
253         {"mac_rx_1024_1518_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
255         {"mac_rx_1519_2047_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
257         {"mac_rx_2048_4095_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
259         {"mac_rx_4096_8191_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
261         {"mac_rx_8192_9216_oct_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
263         {"mac_rx_9217_12287_oct_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
265         {"mac_rx_12288_16383_oct_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
267         {"mac_rx_1519_max_good_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
269         {"mac_rx_1519_max_bad_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
271
272         {"mac_tx_fragment_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
274         {"mac_tx_undermin_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
276         {"mac_tx_jabber_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
278         {"mac_tx_err_all_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
280         {"mac_tx_from_app_good_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
282         {"mac_tx_from_app_bad_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
284         {"mac_rx_fragment_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
286         {"mac_rx_undermin_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
288         {"mac_rx_jabber_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
290         {"mac_rx_fcs_err_pkt_num",
291                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
292         {"mac_rx_send_app_good_pkt_num",
293                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
294         {"mac_rx_send_app_bad_pkt_num",
295                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
296 };
297
298 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
299         {
300                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
301                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
302                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
303                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
304                 .i_port_bitmap = 0x1,
305         },
306 };
307
308 static const u8 hclge_hash_key[] = {
309         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
310         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
311         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
312         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
313         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
314 };
315
316 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
317 {
318 #define HCLGE_MAC_CMD_NUM 21
319
320         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
321         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
322         __le64 *desc_data;
323         int i, k, n;
324         int ret;
325
326         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
327         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
328         if (ret) {
329                 dev_err(&hdev->pdev->dev,
330                         "Get MAC pkt stats fail, status = %d.\n", ret);
331
332                 return ret;
333         }
334
335         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
336                 /* for special opcode 0032, only the first desc has the head */
337                 if (unlikely(i == 0)) {
338                         desc_data = (__le64 *)(&desc[i].data[0]);
339                         n = HCLGE_RD_FIRST_STATS_NUM;
340                 } else {
341                         desc_data = (__le64 *)(&desc[i]);
342                         n = HCLGE_RD_OTHER_STATS_NUM;
343                 }
344
345                 for (k = 0; k < n; k++) {
346                         *data += le64_to_cpu(*desc_data);
347                         data++;
348                         desc_data++;
349                 }
350         }
351
352         return 0;
353 }
354
355 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
356 {
357         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
358         struct hclge_desc *desc;
359         __le64 *desc_data;
360         u16 i, k, n;
361         int ret;
362
363         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
364         if (!desc)
365                 return -ENOMEM;
366         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
367         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
368         if (ret) {
369                 kfree(desc);
370                 return ret;
371         }
372
373         for (i = 0; i < desc_num; i++) {
374                 /* for special opcode 0034, only the first desc has the head */
375                 if (i == 0) {
376                         desc_data = (__le64 *)(&desc[i].data[0]);
377                         n = HCLGE_RD_FIRST_STATS_NUM;
378                 } else {
379                         desc_data = (__le64 *)(&desc[i]);
380                         n = HCLGE_RD_OTHER_STATS_NUM;
381                 }
382
383                 for (k = 0; k < n; k++) {
384                         *data += le64_to_cpu(*desc_data);
385                         data++;
386                         desc_data++;
387                 }
388         }
389
390         kfree(desc);
391
392         return 0;
393 }
394
395 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
396 {
397         struct hclge_desc desc;
398         __le32 *desc_data;
399         u32 reg_num;
400         int ret;
401
402         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
403         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
404         if (ret)
405                 return ret;
406
407         desc_data = (__le32 *)(&desc.data[0]);
408         reg_num = le32_to_cpu(*desc_data);
409
410         *desc_num = 1 + ((reg_num - 3) >> 2) +
411                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
412
413         return 0;
414 }
415
416 static int hclge_mac_update_stats(struct hclge_dev *hdev)
417 {
418         u32 desc_num;
419         int ret;
420
421         ret = hclge_mac_query_reg_num(hdev, &desc_num);
422
423         /* The firmware supports the new statistics acquisition method */
424         if (!ret)
425                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
426         else if (ret == -EOPNOTSUPP)
427                 ret = hclge_mac_update_stats_defective(hdev);
428         else
429                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
430
431         return ret;
432 }
433
434 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
435 {
436         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
437         struct hclge_vport *vport = hclge_get_vport(handle);
438         struct hclge_dev *hdev = vport->back;
439         struct hnae3_queue *queue;
440         struct hclge_desc desc[1];
441         struct hclge_tqp *tqp;
442         int ret, i;
443
444         for (i = 0; i < kinfo->num_tqps; i++) {
445                 queue = handle->kinfo.tqp[i];
446                 tqp = container_of(queue, struct hclge_tqp, q);
447                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
448                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
449                                            true);
450
451                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
452                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
453                 if (ret) {
454                         dev_err(&hdev->pdev->dev,
455                                 "Query tqp stat fail, status = %d,queue = %d\n",
456                                 ret, i);
457                         return ret;
458                 }
459                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
460                         le32_to_cpu(desc[0].data[1]);
461         }
462
463         for (i = 0; i < kinfo->num_tqps; i++) {
464                 queue = handle->kinfo.tqp[i];
465                 tqp = container_of(queue, struct hclge_tqp, q);
466                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
467                 hclge_cmd_setup_basic_desc(&desc[0],
468                                            HCLGE_OPC_QUERY_TX_STATUS,
469                                            true);
470
471                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
472                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
473                 if (ret) {
474                         dev_err(&hdev->pdev->dev,
475                                 "Query tqp stat fail, status = %d,queue = %d\n",
476                                 ret, i);
477                         return ret;
478                 }
479                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
480                         le32_to_cpu(desc[0].data[1]);
481         }
482
483         return 0;
484 }
485
486 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
487 {
488         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
489         struct hclge_tqp *tqp;
490         u64 *buff = data;
491         int i;
492
493         for (i = 0; i < kinfo->num_tqps; i++) {
494                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
496         }
497
498         for (i = 0; i < kinfo->num_tqps; i++) {
499                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
500                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
501         }
502
503         return buff;
504 }
505
506 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
507 {
508         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
509
510         /* each tqp has TX & RX two queues */
511         return kinfo->num_tqps * (2);
512 }
513
514 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
515 {
516         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
517         u8 *buff = data;
518         int i = 0;
519
520         for (i = 0; i < kinfo->num_tqps; i++) {
521                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
522                         struct hclge_tqp, q);
523                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
524                          tqp->index);
525                 buff = buff + ETH_GSTRING_LEN;
526         }
527
528         for (i = 0; i < kinfo->num_tqps; i++) {
529                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
530                         struct hclge_tqp, q);
531                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
532                          tqp->index);
533                 buff = buff + ETH_GSTRING_LEN;
534         }
535
536         return buff;
537 }
538
539 static u64 *hclge_comm_get_stats(const void *comm_stats,
540                                  const struct hclge_comm_stats_str strs[],
541                                  int size, u64 *data)
542 {
543         u64 *buf = data;
544         u32 i;
545
546         for (i = 0; i < size; i++)
547                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
548
549         return buf + size;
550 }
551
552 static u8 *hclge_comm_get_strings(u32 stringset,
553                                   const struct hclge_comm_stats_str strs[],
554                                   int size, u8 *data)
555 {
556         char *buff = (char *)data;
557         u32 i;
558
559         if (stringset != ETH_SS_STATS)
560                 return buff;
561
562         for (i = 0; i < size; i++) {
563                 snprintf(buff, ETH_GSTRING_LEN,
564                          strs[i].desc);
565                 buff = buff + ETH_GSTRING_LEN;
566         }
567
568         return (u8 *)buff;
569 }
570
571 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
572 {
573         struct hnae3_handle *handle;
574         int status;
575
576         handle = &hdev->vport[0].nic;
577         if (handle->client) {
578                 status = hclge_tqps_update_stats(handle);
579                 if (status) {
580                         dev_err(&hdev->pdev->dev,
581                                 "Update TQPS stats fail, status = %d.\n",
582                                 status);
583                 }
584         }
585
586         status = hclge_mac_update_stats(hdev);
587         if (status)
588                 dev_err(&hdev->pdev->dev,
589                         "Update MAC stats fail, status = %d.\n", status);
590 }
591
592 static void hclge_update_stats(struct hnae3_handle *handle,
593                                struct net_device_stats *net_stats)
594 {
595         struct hclge_vport *vport = hclge_get_vport(handle);
596         struct hclge_dev *hdev = vport->back;
597         int status;
598
599         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
600                 return;
601
602         status = hclge_mac_update_stats(hdev);
603         if (status)
604                 dev_err(&hdev->pdev->dev,
605                         "Update MAC stats fail, status = %d.\n",
606                         status);
607
608         status = hclge_tqps_update_stats(handle);
609         if (status)
610                 dev_err(&hdev->pdev->dev,
611                         "Update TQPS stats fail, status = %d.\n",
612                         status);
613
614         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
615 }
616
617 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
618 {
619 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
620                 HNAE3_SUPPORT_PHY_LOOPBACK |\
621                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
622                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
623
624         struct hclge_vport *vport = hclge_get_vport(handle);
625         struct hclge_dev *hdev = vport->back;
626         int count = 0;
627
628         /* Loopback test support rules:
629          * mac: only GE mode support
630          * serdes: all mac mode will support include GE/XGE/LGE/CGE
631          * phy: only support when phy device exist on board
632          */
633         if (stringset == ETH_SS_TEST) {
634                 /* clear loopback bit flags at first */
635                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
636                 if (hdev->pdev->revision >= 0x21 ||
637                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
638                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
639                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
640                         count += 1;
641                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
642                 }
643
644                 count += 2;
645                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
646                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
647         } else if (stringset == ETH_SS_STATS) {
648                 count = ARRAY_SIZE(g_mac_stats_string) +
649                         hclge_tqps_get_sset_count(handle, stringset);
650         }
651
652         return count;
653 }
654
655 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
656                               u8 *data)
657 {
658         u8 *p = (char *)data;
659         int size;
660
661         if (stringset == ETH_SS_STATS) {
662                 size = ARRAY_SIZE(g_mac_stats_string);
663                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
664                                            size, p);
665                 p = hclge_tqps_get_strings(handle, p);
666         } else if (stringset == ETH_SS_TEST) {
667                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
668                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
669                                ETH_GSTRING_LEN);
670                         p += ETH_GSTRING_LEN;
671                 }
672                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
673                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
674                                ETH_GSTRING_LEN);
675                         p += ETH_GSTRING_LEN;
676                 }
677                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
678                         memcpy(p,
679                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
680                                ETH_GSTRING_LEN);
681                         p += ETH_GSTRING_LEN;
682                 }
683                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
684                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
685                                ETH_GSTRING_LEN);
686                         p += ETH_GSTRING_LEN;
687                 }
688         }
689 }
690
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
692 {
693         struct hclge_vport *vport = hclge_get_vport(handle);
694         struct hclge_dev *hdev = vport->back;
695         u64 *p;
696
697         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
698                                  ARRAY_SIZE(g_mac_stats_string), data);
699         p = hclge_tqps_get_stats(handle, p);
700 }
701
702 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
703                                      u64 *rx_cnt)
704 {
705         struct hclge_vport *vport = hclge_get_vport(handle);
706         struct hclge_dev *hdev = vport->back;
707
708         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
709         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
710 }
711
712 static int hclge_parse_func_status(struct hclge_dev *hdev,
713                                    struct hclge_func_status_cmd *status)
714 {
715         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
716                 return -EINVAL;
717
718         /* Set the pf to main pf */
719         if (status->pf_state & HCLGE_PF_STATE_MAIN)
720                 hdev->flag |= HCLGE_FLAG_MAIN;
721         else
722                 hdev->flag &= ~HCLGE_FLAG_MAIN;
723
724         return 0;
725 }
726
727 static int hclge_query_function_status(struct hclge_dev *hdev)
728 {
729 #define HCLGE_QUERY_MAX_CNT     5
730
731         struct hclge_func_status_cmd *req;
732         struct hclge_desc desc;
733         int timeout = 0;
734         int ret;
735
736         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737         req = (struct hclge_func_status_cmd *)desc.data;
738
739         do {
740                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
741                 if (ret) {
742                         dev_err(&hdev->pdev->dev,
743                                 "query function status failed %d.\n", ret);
744                         return ret;
745                 }
746
747                 /* Check pf reset is done */
748                 if (req->pf_state)
749                         break;
750                 usleep_range(1000, 2000);
751         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
752
753         ret = hclge_parse_func_status(hdev, req);
754
755         return ret;
756 }
757
758 static int hclge_query_pf_resource(struct hclge_dev *hdev)
759 {
760         struct hclge_pf_res_cmd *req;
761         struct hclge_desc desc;
762         int ret;
763
764         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
765         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
766         if (ret) {
767                 dev_err(&hdev->pdev->dev,
768                         "query pf resource failed %d.\n", ret);
769                 return ret;
770         }
771
772         req = (struct hclge_pf_res_cmd *)desc.data;
773         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
774         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
775
776         if (req->tx_buf_size)
777                 hdev->tx_buf_size =
778                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
779         else
780                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
781
782         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
783
784         if (req->dv_buf_size)
785                 hdev->dv_buf_size =
786                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
787         else
788                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
789
790         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
791
792         if (hnae3_dev_roce_supported(hdev)) {
793                 hdev->roce_base_msix_offset =
794                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
795                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
796                 hdev->num_roce_msi =
797                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
799
800                 /* PF should have NIC vectors and Roce vectors,
801                  * NIC vectors are queued before Roce vectors.
802                  */
803                 hdev->num_msi = hdev->num_roce_msi +
804                                 hdev->roce_base_msix_offset;
805         } else {
806                 hdev->num_msi =
807                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
808                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
809         }
810
811         return 0;
812 }
813
814 static int hclge_parse_speed(int speed_cmd, int *speed)
815 {
816         switch (speed_cmd) {
817         case 6:
818                 *speed = HCLGE_MAC_SPEED_10M;
819                 break;
820         case 7:
821                 *speed = HCLGE_MAC_SPEED_100M;
822                 break;
823         case 0:
824                 *speed = HCLGE_MAC_SPEED_1G;
825                 break;
826         case 1:
827                 *speed = HCLGE_MAC_SPEED_10G;
828                 break;
829         case 2:
830                 *speed = HCLGE_MAC_SPEED_25G;
831                 break;
832         case 3:
833                 *speed = HCLGE_MAC_SPEED_40G;
834                 break;
835         case 4:
836                 *speed = HCLGE_MAC_SPEED_50G;
837                 break;
838         case 5:
839                 *speed = HCLGE_MAC_SPEED_100G;
840                 break;
841         default:
842                 return -EINVAL;
843         }
844
845         return 0;
846 }
847
848 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
849 {
850         struct hclge_vport *vport = hclge_get_vport(handle);
851         struct hclge_dev *hdev = vport->back;
852         u32 speed_ability = hdev->hw.mac.speed_ability;
853         u32 speed_bit = 0;
854
855         switch (speed) {
856         case HCLGE_MAC_SPEED_10M:
857                 speed_bit = HCLGE_SUPPORT_10M_BIT;
858                 break;
859         case HCLGE_MAC_SPEED_100M:
860                 speed_bit = HCLGE_SUPPORT_100M_BIT;
861                 break;
862         case HCLGE_MAC_SPEED_1G:
863                 speed_bit = HCLGE_SUPPORT_1G_BIT;
864                 break;
865         case HCLGE_MAC_SPEED_10G:
866                 speed_bit = HCLGE_SUPPORT_10G_BIT;
867                 break;
868         case HCLGE_MAC_SPEED_25G:
869                 speed_bit = HCLGE_SUPPORT_25G_BIT;
870                 break;
871         case HCLGE_MAC_SPEED_40G:
872                 speed_bit = HCLGE_SUPPORT_40G_BIT;
873                 break;
874         case HCLGE_MAC_SPEED_50G:
875                 speed_bit = HCLGE_SUPPORT_50G_BIT;
876                 break;
877         case HCLGE_MAC_SPEED_100G:
878                 speed_bit = HCLGE_SUPPORT_100G_BIT;
879                 break;
880         default:
881                 return -EINVAL;
882         }
883
884         if (speed_bit & speed_ability)
885                 return 0;
886
887         return -EINVAL;
888 }
889
890 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
891 {
892         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
893                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
894                                  mac->supported);
895         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
896                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
897                                  mac->supported);
898         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
899                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
900                                  mac->supported);
901         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
902                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
903                                  mac->supported);
904         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
905                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
906                                  mac->supported);
907 }
908
909 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
910 {
911         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
912                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
913                                  mac->supported);
914         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
915                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
916                                  mac->supported);
917         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
918                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
919                                  mac->supported);
920         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
921                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
922                                  mac->supported);
923         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
924                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
925                                  mac->supported);
926 }
927
928 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
929 {
930         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
931                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
932                                  mac->supported);
933         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
934                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
935                                  mac->supported);
936         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
937                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
938                                  mac->supported);
939         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
940                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
941                                  mac->supported);
942         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
943                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
944                                  mac->supported);
945 }
946
947 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
948 {
949         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
950                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
951                                  mac->supported);
952         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
953                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
954                                  mac->supported);
955         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
956                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
957                                  mac->supported);
958         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
959                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
960                                  mac->supported);
961         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
962                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
963                                  mac->supported);
964         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
965                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
966                                  mac->supported);
967 }
968
969 static void hclge_convert_setting_fec(struct hclge_mac *mac)
970 {
971         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
972         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
973
974         switch (mac->speed) {
975         case HCLGE_MAC_SPEED_10G:
976         case HCLGE_MAC_SPEED_40G:
977                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
978                                  mac->supported);
979                 mac->fec_ability =
980                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
981                 break;
982         case HCLGE_MAC_SPEED_25G:
983         case HCLGE_MAC_SPEED_50G:
984                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
985                                  mac->supported);
986                 mac->fec_ability =
987                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
988                         BIT(HNAE3_FEC_AUTO);
989                 break;
990         case HCLGE_MAC_SPEED_100G:
991                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
992                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
993                 break;
994         default:
995                 mac->fec_ability = 0;
996                 break;
997         }
998 }
999
1000 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1001                                         u8 speed_ability)
1002 {
1003         struct hclge_mac *mac = &hdev->hw.mac;
1004
1005         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1006                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1007                                  mac->supported);
1008
1009         hclge_convert_setting_sr(mac, speed_ability);
1010         hclge_convert_setting_lr(mac, speed_ability);
1011         hclge_convert_setting_cr(mac, speed_ability);
1012         if (hdev->pdev->revision >= 0x21)
1013                 hclge_convert_setting_fec(mac);
1014
1015         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1016         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1017         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1018 }
1019
1020 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1021                                             u8 speed_ability)
1022 {
1023         struct hclge_mac *mac = &hdev->hw.mac;
1024
1025         hclge_convert_setting_kr(mac, speed_ability);
1026         if (hdev->pdev->revision >= 0x21)
1027                 hclge_convert_setting_fec(mac);
1028         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1029         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1030         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1031 }
1032
1033 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1034                                          u8 speed_ability)
1035 {
1036         unsigned long *supported = hdev->hw.mac.supported;
1037
1038         /* default to support all speed for GE port */
1039         if (!speed_ability)
1040                 speed_ability = HCLGE_SUPPORT_GE;
1041
1042         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1044                                  supported);
1045
1046         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1047                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1048                                  supported);
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1050                                  supported);
1051         }
1052
1053         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1054                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1055                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1056         }
1057
1058         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1059         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1060         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1061 }
1062
1063 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1064 {
1065         u8 media_type = hdev->hw.mac.media_type;
1066
1067         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1068                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1069         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1070                 hclge_parse_copper_link_mode(hdev, speed_ability);
1071         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1072                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1073 }
1074 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1075 {
1076         struct hclge_cfg_param_cmd *req;
1077         u64 mac_addr_tmp_high;
1078         u64 mac_addr_tmp;
1079         unsigned int i;
1080
1081         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1082
1083         /* get the configuration */
1084         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1085                                               HCLGE_CFG_VMDQ_M,
1086                                               HCLGE_CFG_VMDQ_S);
1087         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1088                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1089         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090                                             HCLGE_CFG_TQP_DESC_N_M,
1091                                             HCLGE_CFG_TQP_DESC_N_S);
1092
1093         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1094                                         HCLGE_CFG_PHY_ADDR_M,
1095                                         HCLGE_CFG_PHY_ADDR_S);
1096         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1097                                           HCLGE_CFG_MEDIA_TP_M,
1098                                           HCLGE_CFG_MEDIA_TP_S);
1099         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1100                                           HCLGE_CFG_RX_BUF_LEN_M,
1101                                           HCLGE_CFG_RX_BUF_LEN_S);
1102         /* get mac_address */
1103         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1104         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1105                                             HCLGE_CFG_MAC_ADDR_H_M,
1106                                             HCLGE_CFG_MAC_ADDR_H_S);
1107
1108         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1109
1110         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1111                                              HCLGE_CFG_DEFAULT_SPEED_M,
1112                                              HCLGE_CFG_DEFAULT_SPEED_S);
1113         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1114                                             HCLGE_CFG_RSS_SIZE_M,
1115                                             HCLGE_CFG_RSS_SIZE_S);
1116
1117         for (i = 0; i < ETH_ALEN; i++)
1118                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1119
1120         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1121         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1122
1123         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1124                                              HCLGE_CFG_SPEED_ABILITY_M,
1125                                              HCLGE_CFG_SPEED_ABILITY_S);
1126         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1127                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1128                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1129         if (!cfg->umv_space)
1130                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1131 }
1132
1133 /* hclge_get_cfg: query the static parameter from flash
1134  * @hdev: pointer to struct hclge_dev
1135  * @hcfg: the config structure to be getted
1136  */
1137 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1138 {
1139         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1140         struct hclge_cfg_param_cmd *req;
1141         unsigned int i;
1142         int ret;
1143
1144         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1145                 u32 offset = 0;
1146
1147                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1148                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1149                                            true);
1150                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1151                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1152                 /* Len should be united by 4 bytes when send to hardware */
1153                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1154                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1155                 req->offset = cpu_to_le32(offset);
1156         }
1157
1158         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1159         if (ret) {
1160                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1161                 return ret;
1162         }
1163
1164         hclge_parse_cfg(hcfg, desc);
1165
1166         return 0;
1167 }
1168
1169 static int hclge_get_cap(struct hclge_dev *hdev)
1170 {
1171         int ret;
1172
1173         ret = hclge_query_function_status(hdev);
1174         if (ret) {
1175                 dev_err(&hdev->pdev->dev,
1176                         "query function status error %d.\n", ret);
1177                 return ret;
1178         }
1179
1180         /* get pf resource */
1181         ret = hclge_query_pf_resource(hdev);
1182         if (ret)
1183                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1184
1185         return ret;
1186 }
1187
1188 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1189 {
1190 #define HCLGE_MIN_TX_DESC       64
1191 #define HCLGE_MIN_RX_DESC       64
1192
1193         if (!is_kdump_kernel())
1194                 return;
1195
1196         dev_info(&hdev->pdev->dev,
1197                  "Running kdump kernel. Using minimal resources\n");
1198
1199         /* minimal queue pairs equals to the number of vports */
1200         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1201         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1202         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1203 }
1204
1205 static int hclge_configure(struct hclge_dev *hdev)
1206 {
1207         struct hclge_cfg cfg;
1208         unsigned int i;
1209         int ret;
1210
1211         ret = hclge_get_cfg(hdev, &cfg);
1212         if (ret) {
1213                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1214                 return ret;
1215         }
1216
1217         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218         hdev->base_tqp_pid = 0;
1219         hdev->rss_size_max = cfg.rss_size_max;
1220         hdev->rx_buf_len = cfg.rx_buf_len;
1221         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222         hdev->hw.mac.media_type = cfg.media_type;
1223         hdev->hw.mac.phy_addr = cfg.phy_addr;
1224         hdev->num_tx_desc = cfg.tqp_desc_num;
1225         hdev->num_rx_desc = cfg.tqp_desc_num;
1226         hdev->tm_info.num_pg = 1;
1227         hdev->tc_max = cfg.tc_num;
1228         hdev->tm_info.hw_pfc_map = 0;
1229         hdev->wanted_umv_size = cfg.umv_space;
1230
1231         if (hnae3_dev_fd_supported(hdev)) {
1232                 hdev->fd_en = true;
1233                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1234         }
1235
1236         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1237         if (ret) {
1238                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1239                 return ret;
1240         }
1241
1242         hclge_parse_link_mode(hdev, cfg.speed_ability);
1243
1244         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245             (hdev->tc_max < 1)) {
1246                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1247                          hdev->tc_max);
1248                 hdev->tc_max = 1;
1249         }
1250
1251         /* Dev does not support DCB */
1252         if (!hnae3_dev_dcb_supported(hdev)) {
1253                 hdev->tc_max = 1;
1254                 hdev->pfc_max = 0;
1255         } else {
1256                 hdev->pfc_max = hdev->tc_max;
1257         }
1258
1259         hdev->tm_info.num_tc = 1;
1260
1261         /* Currently not support uncontiuous tc */
1262         for (i = 0; i < hdev->tm_info.num_tc; i++)
1263                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1264
1265         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1266
1267         hclge_init_kdump_kernel_config(hdev);
1268
1269         return ret;
1270 }
1271
1272 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1273                             unsigned int tso_mss_max)
1274 {
1275         struct hclge_cfg_tso_status_cmd *req;
1276         struct hclge_desc desc;
1277         u16 tso_mss;
1278
1279         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1280
1281         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1282
1283         tso_mss = 0;
1284         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286         req->tso_mss_min = cpu_to_le16(tso_mss);
1287
1288         tso_mss = 0;
1289         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291         req->tso_mss_max = cpu_to_le16(tso_mss);
1292
1293         return hclge_cmd_send(&hdev->hw, &desc, 1);
1294 }
1295
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1297 {
1298         struct hclge_cfg_gro_status_cmd *req;
1299         struct hclge_desc desc;
1300         int ret;
1301
1302         if (!hnae3_dev_gro_supported(hdev))
1303                 return 0;
1304
1305         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1307
1308         req->gro_en = cpu_to_le16(en ? 1 : 0);
1309
1310         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1311         if (ret)
1312                 dev_err(&hdev->pdev->dev,
1313                         "GRO hardware config cmd failed, ret = %d\n", ret);
1314
1315         return ret;
1316 }
1317
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1319 {
1320         struct hclge_tqp *tqp;
1321         int i;
1322
1323         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1325         if (!hdev->htqp)
1326                 return -ENOMEM;
1327
1328         tqp = hdev->htqp;
1329
1330         for (i = 0; i < hdev->num_tqps; i++) {
1331                 tqp->dev = &hdev->pdev->dev;
1332                 tqp->index = i;
1333
1334                 tqp->q.ae_algo = &ae_algo;
1335                 tqp->q.buf_size = hdev->rx_buf_len;
1336                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339                         i * HCLGE_TQP_REG_SIZE;
1340
1341                 tqp++;
1342         }
1343
1344         return 0;
1345 }
1346
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1349 {
1350         struct hclge_tqp_map_cmd *req;
1351         struct hclge_desc desc;
1352         int ret;
1353
1354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1355
1356         req = (struct hclge_tqp_map_cmd *)desc.data;
1357         req->tqp_id = cpu_to_le16(tqp_pid);
1358         req->tqp_vf = func_id;
1359         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360                         1 << HCLGE_TQP_MAP_EN_B;
1361         req->tqp_vid = cpu_to_le16(tqp_vid);
1362
1363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1364         if (ret)
1365                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1366
1367         return ret;
1368 }
1369
1370 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1371 {
1372         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373         struct hclge_dev *hdev = vport->back;
1374         int i, alloced;
1375
1376         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377              alloced < num_tqps; i++) {
1378                 if (!hdev->htqp[i].alloced) {
1379                         hdev->htqp[i].q.handle = &vport->nic;
1380                         hdev->htqp[i].q.tqp_index = alloced;
1381                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384                         hdev->htqp[i].alloced = true;
1385                         alloced++;
1386                 }
1387         }
1388         vport->alloc_tqps = alloced;
1389         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1391
1392         return 0;
1393 }
1394
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396                             u16 num_tx_desc, u16 num_rx_desc)
1397
1398 {
1399         struct hnae3_handle *nic = &vport->nic;
1400         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401         struct hclge_dev *hdev = vport->back;
1402         int ret;
1403
1404         kinfo->num_tx_desc = num_tx_desc;
1405         kinfo->num_rx_desc = num_rx_desc;
1406
1407         kinfo->rx_buf_len = hdev->rx_buf_len;
1408
1409         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1411         if (!kinfo->tqp)
1412                 return -ENOMEM;
1413
1414         ret = hclge_assign_tqp(vport, num_tqps);
1415         if (ret)
1416                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1417
1418         return ret;
1419 }
1420
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422                                   struct hclge_vport *vport)
1423 {
1424         struct hnae3_handle *nic = &vport->nic;
1425         struct hnae3_knic_private_info *kinfo;
1426         u16 i;
1427
1428         kinfo = &nic->kinfo;
1429         for (i = 0; i < vport->alloc_tqps; i++) {
1430                 struct hclge_tqp *q =
1431                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1432                 bool is_pf;
1433                 int ret;
1434
1435                 is_pf = !(vport->vport_id);
1436                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1437                                              i, is_pf);
1438                 if (ret)
1439                         return ret;
1440         }
1441
1442         return 0;
1443 }
1444
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1446 {
1447         struct hclge_vport *vport = hdev->vport;
1448         u16 i, num_vport;
1449
1450         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451         for (i = 0; i < num_vport; i++) {
1452                 int ret;
1453
1454                 ret = hclge_map_tqp_to_vport(hdev, vport);
1455                 if (ret)
1456                         return ret;
1457
1458                 vport++;
1459         }
1460
1461         return 0;
1462 }
1463
1464 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1465 {
1466         struct hnae3_handle *nic = &vport->nic;
1467         struct hclge_dev *hdev = vport->back;
1468         int ret;
1469
1470         nic->pdev = hdev->pdev;
1471         nic->ae_algo = &ae_algo;
1472         nic->numa_node_mask = hdev->numa_node_mask;
1473
1474         ret = hclge_knic_setup(vport, num_tqps,
1475                                hdev->num_tx_desc, hdev->num_rx_desc);
1476         if (ret)
1477                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1478
1479         return ret;
1480 }
1481
1482 static int hclge_alloc_vport(struct hclge_dev *hdev)
1483 {
1484         struct pci_dev *pdev = hdev->pdev;
1485         struct hclge_vport *vport;
1486         u32 tqp_main_vport;
1487         u32 tqp_per_vport;
1488         int num_vport, i;
1489         int ret;
1490
1491         /* We need to alloc a vport for main NIC of PF */
1492         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1493
1494         if (hdev->num_tqps < num_vport) {
1495                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1496                         hdev->num_tqps, num_vport);
1497                 return -EINVAL;
1498         }
1499
1500         /* Alloc the same number of TQPs for every vport */
1501         tqp_per_vport = hdev->num_tqps / num_vport;
1502         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1503
1504         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1505                              GFP_KERNEL);
1506         if (!vport)
1507                 return -ENOMEM;
1508
1509         hdev->vport = vport;
1510         hdev->num_alloc_vport = num_vport;
1511
1512         if (IS_ENABLED(CONFIG_PCI_IOV))
1513                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1514
1515         for (i = 0; i < num_vport; i++) {
1516                 vport->back = hdev;
1517                 vport->vport_id = i;
1518                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1519                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1520                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1521                 INIT_LIST_HEAD(&vport->vlan_list);
1522                 INIT_LIST_HEAD(&vport->uc_mac_list);
1523                 INIT_LIST_HEAD(&vport->mc_mac_list);
1524
1525                 if (i == 0)
1526                         ret = hclge_vport_setup(vport, tqp_main_vport);
1527                 else
1528                         ret = hclge_vport_setup(vport, tqp_per_vport);
1529                 if (ret) {
1530                         dev_err(&pdev->dev,
1531                                 "vport setup failed for vport %d, %d\n",
1532                                 i, ret);
1533                         return ret;
1534                 }
1535
1536                 vport++;
1537         }
1538
1539         return 0;
1540 }
1541
1542 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1543                                     struct hclge_pkt_buf_alloc *buf_alloc)
1544 {
1545 /* TX buffer size is unit by 128 byte */
1546 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1547 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1548         struct hclge_tx_buff_alloc_cmd *req;
1549         struct hclge_desc desc;
1550         int ret;
1551         u8 i;
1552
1553         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1554
1555         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1556         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1557                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1558
1559                 req->tx_pkt_buff[i] =
1560                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1561                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1562         }
1563
1564         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1565         if (ret)
1566                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1567                         ret);
1568
1569         return ret;
1570 }
1571
1572 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1573                                  struct hclge_pkt_buf_alloc *buf_alloc)
1574 {
1575         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1576
1577         if (ret)
1578                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1579
1580         return ret;
1581 }
1582
1583 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1584 {
1585         unsigned int i;
1586         u32 cnt = 0;
1587
1588         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1589                 if (hdev->hw_tc_map & BIT(i))
1590                         cnt++;
1591         return cnt;
1592 }
1593
1594 /* Get the number of pfc enabled TCs, which have private buffer */
1595 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1596                                   struct hclge_pkt_buf_alloc *buf_alloc)
1597 {
1598         struct hclge_priv_buf *priv;
1599         unsigned int i;
1600         int cnt = 0;
1601
1602         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1603                 priv = &buf_alloc->priv_buf[i];
1604                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1605                     priv->enable)
1606                         cnt++;
1607         }
1608
1609         return cnt;
1610 }
1611
1612 /* Get the number of pfc disabled TCs, which have private buffer */
1613 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1614                                      struct hclge_pkt_buf_alloc *buf_alloc)
1615 {
1616         struct hclge_priv_buf *priv;
1617         unsigned int i;
1618         int cnt = 0;
1619
1620         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1621                 priv = &buf_alloc->priv_buf[i];
1622                 if (hdev->hw_tc_map & BIT(i) &&
1623                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1624                     priv->enable)
1625                         cnt++;
1626         }
1627
1628         return cnt;
1629 }
1630
1631 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1632 {
1633         struct hclge_priv_buf *priv;
1634         u32 rx_priv = 0;
1635         int i;
1636
1637         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1638                 priv = &buf_alloc->priv_buf[i];
1639                 if (priv->enable)
1640                         rx_priv += priv->buf_size;
1641         }
1642         return rx_priv;
1643 }
1644
1645 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1646 {
1647         u32 i, total_tx_size = 0;
1648
1649         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1650                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1651
1652         return total_tx_size;
1653 }
1654
1655 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1656                                 struct hclge_pkt_buf_alloc *buf_alloc,
1657                                 u32 rx_all)
1658 {
1659         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1660         u32 tc_num = hclge_get_tc_num(hdev);
1661         u32 shared_buf, aligned_mps;
1662         u32 rx_priv;
1663         int i;
1664
1665         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1666
1667         if (hnae3_dev_dcb_supported(hdev))
1668                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1669                                         hdev->dv_buf_size;
1670         else
1671                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1672                                         + hdev->dv_buf_size;
1673
1674         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1675         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1676                              HCLGE_BUF_SIZE_UNIT);
1677
1678         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1679         if (rx_all < rx_priv + shared_std)
1680                 return false;
1681
1682         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1683         buf_alloc->s_buf.buf_size = shared_buf;
1684         if (hnae3_dev_dcb_supported(hdev)) {
1685                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1686                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1687                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1688                                   HCLGE_BUF_SIZE_UNIT);
1689         } else {
1690                 buf_alloc->s_buf.self.high = aligned_mps +
1691                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1692                 buf_alloc->s_buf.self.low = aligned_mps;
1693         }
1694
1695         if (hnae3_dev_dcb_supported(hdev)) {
1696                 if (tc_num)
1697                         hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1698                 else
1699                         hi_thrd = shared_buf - hdev->dv_buf_size;
1700
1701                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1702                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1703                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1704         } else {
1705                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1706                 lo_thrd = aligned_mps;
1707         }
1708
1709         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1710                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1711                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1712         }
1713
1714         return true;
1715 }
1716
1717 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1718                                 struct hclge_pkt_buf_alloc *buf_alloc)
1719 {
1720         u32 i, total_size;
1721
1722         total_size = hdev->pkt_buf_size;
1723
1724         /* alloc tx buffer for all enabled tc */
1725         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1727
1728                 if (hdev->hw_tc_map & BIT(i)) {
1729                         if (total_size < hdev->tx_buf_size)
1730                                 return -ENOMEM;
1731
1732                         priv->tx_buf_size = hdev->tx_buf_size;
1733                 } else {
1734                         priv->tx_buf_size = 0;
1735                 }
1736
1737                 total_size -= priv->tx_buf_size;
1738         }
1739
1740         return 0;
1741 }
1742
1743 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1744                                   struct hclge_pkt_buf_alloc *buf_alloc)
1745 {
1746         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1747         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1748         unsigned int i;
1749
1750         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1751                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1752
1753                 priv->enable = 0;
1754                 priv->wl.low = 0;
1755                 priv->wl.high = 0;
1756                 priv->buf_size = 0;
1757
1758                 if (!(hdev->hw_tc_map & BIT(i)))
1759                         continue;
1760
1761                 priv->enable = 1;
1762
1763                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1764                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1765                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1766                                                 HCLGE_BUF_SIZE_UNIT);
1767                 } else {
1768                         priv->wl.low = 0;
1769                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1770                                         aligned_mps;
1771                 }
1772
1773                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1774         }
1775
1776         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1777 }
1778
1779 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1780                                           struct hclge_pkt_buf_alloc *buf_alloc)
1781 {
1782         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1783         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1784         int i;
1785
1786         /* let the last to be cleared first */
1787         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1788                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1789                 unsigned int mask = BIT((unsigned int)i);
1790
1791                 if (hdev->hw_tc_map & mask &&
1792                     !(hdev->tm_info.hw_pfc_map & mask)) {
1793                         /* Clear the no pfc TC private buffer */
1794                         priv->wl.low = 0;
1795                         priv->wl.high = 0;
1796                         priv->buf_size = 0;
1797                         priv->enable = 0;
1798                         no_pfc_priv_num--;
1799                 }
1800
1801                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1802                     no_pfc_priv_num == 0)
1803                         break;
1804         }
1805
1806         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1807 }
1808
1809 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1810                                         struct hclge_pkt_buf_alloc *buf_alloc)
1811 {
1812         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1813         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1814         int i;
1815
1816         /* let the last to be cleared first */
1817         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1818                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1819                 unsigned int mask = BIT((unsigned int)i);
1820
1821                 if (hdev->hw_tc_map & mask &&
1822                     hdev->tm_info.hw_pfc_map & mask) {
1823                         /* Reduce the number of pfc TC with private buffer */
1824                         priv->wl.low = 0;
1825                         priv->enable = 0;
1826                         priv->wl.high = 0;
1827                         priv->buf_size = 0;
1828                         pfc_priv_num--;
1829                 }
1830
1831                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1832                     pfc_priv_num == 0)
1833                         break;
1834         }
1835
1836         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1837 }
1838
1839 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1840  * @hdev: pointer to struct hclge_dev
1841  * @buf_alloc: pointer to buffer calculation data
1842  * @return: 0: calculate sucessful, negative: fail
1843  */
1844 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1845                                 struct hclge_pkt_buf_alloc *buf_alloc)
1846 {
1847         /* When DCB is not supported, rx private buffer is not allocated. */
1848         if (!hnae3_dev_dcb_supported(hdev)) {
1849                 u32 rx_all = hdev->pkt_buf_size;
1850
1851                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1852                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1853                         return -ENOMEM;
1854
1855                 return 0;
1856         }
1857
1858         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1859                 return 0;
1860
1861         /* try to decrease the buffer size */
1862         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1863                 return 0;
1864
1865         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1866                 return 0;
1867
1868         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1869                 return 0;
1870
1871         return -ENOMEM;
1872 }
1873
1874 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1875                                    struct hclge_pkt_buf_alloc *buf_alloc)
1876 {
1877         struct hclge_rx_priv_buff_cmd *req;
1878         struct hclge_desc desc;
1879         int ret;
1880         int i;
1881
1882         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1883         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1884
1885         /* Alloc private buffer TCs */
1886         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1887                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1888
1889                 req->buf_num[i] =
1890                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1891                 req->buf_num[i] |=
1892                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1893         }
1894
1895         req->shared_buf =
1896                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1897                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1898
1899         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1900         if (ret)
1901                 dev_err(&hdev->pdev->dev,
1902                         "rx private buffer alloc cmd failed %d\n", ret);
1903
1904         return ret;
1905 }
1906
1907 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1908                                    struct hclge_pkt_buf_alloc *buf_alloc)
1909 {
1910         struct hclge_rx_priv_wl_buf *req;
1911         struct hclge_priv_buf *priv;
1912         struct hclge_desc desc[2];
1913         int i, j;
1914         int ret;
1915
1916         for (i = 0; i < 2; i++) {
1917                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1918                                            false);
1919                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1920
1921                 /* The first descriptor set the NEXT bit to 1 */
1922                 if (i == 0)
1923                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1924                 else
1925                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1926
1927                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1928                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1929
1930                         priv = &buf_alloc->priv_buf[idx];
1931                         req->tc_wl[j].high =
1932                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1933                         req->tc_wl[j].high |=
1934                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1935                         req->tc_wl[j].low =
1936                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1937                         req->tc_wl[j].low |=
1938                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1939                 }
1940         }
1941
1942         /* Send 2 descriptor at one time */
1943         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1944         if (ret)
1945                 dev_err(&hdev->pdev->dev,
1946                         "rx private waterline config cmd failed %d\n",
1947                         ret);
1948         return ret;
1949 }
1950
1951 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1952                                     struct hclge_pkt_buf_alloc *buf_alloc)
1953 {
1954         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1955         struct hclge_rx_com_thrd *req;
1956         struct hclge_desc desc[2];
1957         struct hclge_tc_thrd *tc;
1958         int i, j;
1959         int ret;
1960
1961         for (i = 0; i < 2; i++) {
1962                 hclge_cmd_setup_basic_desc(&desc[i],
1963                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1964                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1965
1966                 /* The first descriptor set the NEXT bit to 1 */
1967                 if (i == 0)
1968                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1969                 else
1970                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1971
1972                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1973                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1974
1975                         req->com_thrd[j].high =
1976                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1977                         req->com_thrd[j].high |=
1978                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1979                         req->com_thrd[j].low =
1980                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1981                         req->com_thrd[j].low |=
1982                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1983                 }
1984         }
1985
1986         /* Send 2 descriptors at one time */
1987         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1988         if (ret)
1989                 dev_err(&hdev->pdev->dev,
1990                         "common threshold config cmd failed %d\n", ret);
1991         return ret;
1992 }
1993
1994 static int hclge_common_wl_config(struct hclge_dev *hdev,
1995                                   struct hclge_pkt_buf_alloc *buf_alloc)
1996 {
1997         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1998         struct hclge_rx_com_wl *req;
1999         struct hclge_desc desc;
2000         int ret;
2001
2002         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2003
2004         req = (struct hclge_rx_com_wl *)desc.data;
2005         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2006         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2007
2008         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2009         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2010
2011         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2012         if (ret)
2013                 dev_err(&hdev->pdev->dev,
2014                         "common waterline config cmd failed %d\n", ret);
2015
2016         return ret;
2017 }
2018
2019 int hclge_buffer_alloc(struct hclge_dev *hdev)
2020 {
2021         struct hclge_pkt_buf_alloc *pkt_buf;
2022         int ret;
2023
2024         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2025         if (!pkt_buf)
2026                 return -ENOMEM;
2027
2028         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2029         if (ret) {
2030                 dev_err(&hdev->pdev->dev,
2031                         "could not calc tx buffer size for all TCs %d\n", ret);
2032                 goto out;
2033         }
2034
2035         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2036         if (ret) {
2037                 dev_err(&hdev->pdev->dev,
2038                         "could not alloc tx buffers %d\n", ret);
2039                 goto out;
2040         }
2041
2042         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2043         if (ret) {
2044                 dev_err(&hdev->pdev->dev,
2045                         "could not calc rx priv buffer size for all TCs %d\n",
2046                         ret);
2047                 goto out;
2048         }
2049
2050         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2051         if (ret) {
2052                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2053                         ret);
2054                 goto out;
2055         }
2056
2057         if (hnae3_dev_dcb_supported(hdev)) {
2058                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2059                 if (ret) {
2060                         dev_err(&hdev->pdev->dev,
2061                                 "could not configure rx private waterline %d\n",
2062                                 ret);
2063                         goto out;
2064                 }
2065
2066                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2067                 if (ret) {
2068                         dev_err(&hdev->pdev->dev,
2069                                 "could not configure common threshold %d\n",
2070                                 ret);
2071                         goto out;
2072                 }
2073         }
2074
2075         ret = hclge_common_wl_config(hdev, pkt_buf);
2076         if (ret)
2077                 dev_err(&hdev->pdev->dev,
2078                         "could not configure common waterline %d\n", ret);
2079
2080 out:
2081         kfree(pkt_buf);
2082         return ret;
2083 }
2084
2085 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2086 {
2087         struct hnae3_handle *roce = &vport->roce;
2088         struct hnae3_handle *nic = &vport->nic;
2089
2090         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2091
2092         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2093             vport->back->num_msi_left == 0)
2094                 return -EINVAL;
2095
2096         roce->rinfo.base_vector = vport->back->roce_base_vector;
2097
2098         roce->rinfo.netdev = nic->kinfo.netdev;
2099         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2100
2101         roce->pdev = nic->pdev;
2102         roce->ae_algo = nic->ae_algo;
2103         roce->numa_node_mask = nic->numa_node_mask;
2104
2105         return 0;
2106 }
2107
2108 static int hclge_init_msi(struct hclge_dev *hdev)
2109 {
2110         struct pci_dev *pdev = hdev->pdev;
2111         int vectors;
2112         int i;
2113
2114         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2115                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2116         if (vectors < 0) {
2117                 dev_err(&pdev->dev,
2118                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2119                         vectors);
2120                 return vectors;
2121         }
2122         if (vectors < hdev->num_msi)
2123                 dev_warn(&hdev->pdev->dev,
2124                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2125                          hdev->num_msi, vectors);
2126
2127         hdev->num_msi = vectors;
2128         hdev->num_msi_left = vectors;
2129         hdev->base_msi_vector = pdev->irq;
2130         hdev->roce_base_vector = hdev->base_msi_vector +
2131                                 hdev->roce_base_msix_offset;
2132
2133         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2134                                            sizeof(u16), GFP_KERNEL);
2135         if (!hdev->vector_status) {
2136                 pci_free_irq_vectors(pdev);
2137                 return -ENOMEM;
2138         }
2139
2140         for (i = 0; i < hdev->num_msi; i++)
2141                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2142
2143         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2144                                         sizeof(int), GFP_KERNEL);
2145         if (!hdev->vector_irq) {
2146                 pci_free_irq_vectors(pdev);
2147                 return -ENOMEM;
2148         }
2149
2150         return 0;
2151 }
2152
2153 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2154 {
2155         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2156                 duplex = HCLGE_MAC_FULL;
2157
2158         return duplex;
2159 }
2160
2161 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2162                                       u8 duplex)
2163 {
2164         struct hclge_config_mac_speed_dup_cmd *req;
2165         struct hclge_desc desc;
2166         int ret;
2167
2168         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2169
2170         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2171
2172         if (duplex)
2173                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2174
2175         switch (speed) {
2176         case HCLGE_MAC_SPEED_10M:
2177                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2178                                 HCLGE_CFG_SPEED_S, 6);
2179                 break;
2180         case HCLGE_MAC_SPEED_100M:
2181                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2182                                 HCLGE_CFG_SPEED_S, 7);
2183                 break;
2184         case HCLGE_MAC_SPEED_1G:
2185                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2186                                 HCLGE_CFG_SPEED_S, 0);
2187                 break;
2188         case HCLGE_MAC_SPEED_10G:
2189                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2190                                 HCLGE_CFG_SPEED_S, 1);
2191                 break;
2192         case HCLGE_MAC_SPEED_25G:
2193                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2194                                 HCLGE_CFG_SPEED_S, 2);
2195                 break;
2196         case HCLGE_MAC_SPEED_40G:
2197                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2198                                 HCLGE_CFG_SPEED_S, 3);
2199                 break;
2200         case HCLGE_MAC_SPEED_50G:
2201                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2202                                 HCLGE_CFG_SPEED_S, 4);
2203                 break;
2204         case HCLGE_MAC_SPEED_100G:
2205                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2206                                 HCLGE_CFG_SPEED_S, 5);
2207                 break;
2208         default:
2209                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2210                 return -EINVAL;
2211         }
2212
2213         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2214                       1);
2215
2216         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2217         if (ret) {
2218                 dev_err(&hdev->pdev->dev,
2219                         "mac speed/duplex config cmd failed %d.\n", ret);
2220                 return ret;
2221         }
2222
2223         return 0;
2224 }
2225
2226 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2227 {
2228         int ret;
2229
2230         duplex = hclge_check_speed_dup(duplex, speed);
2231         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2232                 return 0;
2233
2234         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2235         if (ret)
2236                 return ret;
2237
2238         hdev->hw.mac.speed = speed;
2239         hdev->hw.mac.duplex = duplex;
2240
2241         return 0;
2242 }
2243
2244 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2245                                      u8 duplex)
2246 {
2247         struct hclge_vport *vport = hclge_get_vport(handle);
2248         struct hclge_dev *hdev = vport->back;
2249
2250         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2251 }
2252
2253 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2254 {
2255         struct hclge_config_auto_neg_cmd *req;
2256         struct hclge_desc desc;
2257         u32 flag = 0;
2258         int ret;
2259
2260         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2261
2262         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2263         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2264         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2265
2266         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2267         if (ret)
2268                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2269                         ret);
2270
2271         return ret;
2272 }
2273
2274 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2275 {
2276         struct hclge_vport *vport = hclge_get_vport(handle);
2277         struct hclge_dev *hdev = vport->back;
2278
2279         if (!hdev->hw.mac.support_autoneg) {
2280                 if (enable) {
2281                         dev_err(&hdev->pdev->dev,
2282                                 "autoneg is not supported by current port\n");
2283                         return -EOPNOTSUPP;
2284                 } else {
2285                         return 0;
2286                 }
2287         }
2288
2289         return hclge_set_autoneg_en(hdev, enable);
2290 }
2291
2292 static int hclge_get_autoneg(struct hnae3_handle *handle)
2293 {
2294         struct hclge_vport *vport = hclge_get_vport(handle);
2295         struct hclge_dev *hdev = vport->back;
2296         struct phy_device *phydev = hdev->hw.mac.phydev;
2297
2298         if (phydev)
2299                 return phydev->autoneg;
2300
2301         return hdev->hw.mac.autoneg;
2302 }
2303
2304 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2305 {
2306         struct hclge_vport *vport = hclge_get_vport(handle);
2307         struct hclge_dev *hdev = vport->back;
2308         int ret;
2309
2310         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2311
2312         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2313         if (ret)
2314                 return ret;
2315         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2316 }
2317
2318 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2319 {
2320         struct hclge_config_fec_cmd *req;
2321         struct hclge_desc desc;
2322         int ret;
2323
2324         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2325
2326         req = (struct hclge_config_fec_cmd *)desc.data;
2327         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2328                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2329         if (fec_mode & BIT(HNAE3_FEC_RS))
2330                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2331                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2332         if (fec_mode & BIT(HNAE3_FEC_BASER))
2333                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2334                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2335
2336         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2337         if (ret)
2338                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2339
2340         return ret;
2341 }
2342
2343 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2344 {
2345         struct hclge_vport *vport = hclge_get_vport(handle);
2346         struct hclge_dev *hdev = vport->back;
2347         struct hclge_mac *mac = &hdev->hw.mac;
2348         int ret;
2349
2350         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2351                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2352                 return -EINVAL;
2353         }
2354
2355         ret = hclge_set_fec_hw(hdev, fec_mode);
2356         if (ret)
2357                 return ret;
2358
2359         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2360         return 0;
2361 }
2362
2363 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2364                           u8 *fec_mode)
2365 {
2366         struct hclge_vport *vport = hclge_get_vport(handle);
2367         struct hclge_dev *hdev = vport->back;
2368         struct hclge_mac *mac = &hdev->hw.mac;
2369
2370         if (fec_ability)
2371                 *fec_ability = mac->fec_ability;
2372         if (fec_mode)
2373                 *fec_mode = mac->fec_mode;
2374 }
2375
2376 static int hclge_mac_init(struct hclge_dev *hdev)
2377 {
2378         struct hclge_mac *mac = &hdev->hw.mac;
2379         int ret;
2380
2381         hdev->support_sfp_query = true;
2382         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2383         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2384                                          hdev->hw.mac.duplex);
2385         if (ret) {
2386                 dev_err(&hdev->pdev->dev,
2387                         "Config mac speed dup fail ret=%d\n", ret);
2388                 return ret;
2389         }
2390
2391         mac->link = 0;
2392
2393         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2394                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2395                 if (ret) {
2396                         dev_err(&hdev->pdev->dev,
2397                                 "Fec mode init fail, ret = %d\n", ret);
2398                         return ret;
2399                 }
2400         }
2401
2402         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2403         if (ret) {
2404                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2405                 return ret;
2406         }
2407
2408         ret = hclge_buffer_alloc(hdev);
2409         if (ret)
2410                 dev_err(&hdev->pdev->dev,
2411                         "allocate buffer fail, ret=%d\n", ret);
2412
2413         return ret;
2414 }
2415
2416 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2417 {
2418         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2419             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2420                 schedule_work(&hdev->mbx_service_task);
2421 }
2422
2423 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2424 {
2425         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2426             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2427                 schedule_work(&hdev->rst_service_task);
2428 }
2429
2430 static void hclge_task_schedule(struct hclge_dev *hdev)
2431 {
2432         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2433             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2434             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2435                 (void)schedule_work(&hdev->service_task);
2436 }
2437
2438 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2439 {
2440         struct hclge_link_status_cmd *req;
2441         struct hclge_desc desc;
2442         int link_status;
2443         int ret;
2444
2445         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2446         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2447         if (ret) {
2448                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2449                         ret);
2450                 return ret;
2451         }
2452
2453         req = (struct hclge_link_status_cmd *)desc.data;
2454         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2455
2456         return !!link_status;
2457 }
2458
2459 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2460 {
2461         unsigned int mac_state;
2462         int link_stat;
2463
2464         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2465                 return 0;
2466
2467         mac_state = hclge_get_mac_link_status(hdev);
2468
2469         if (hdev->hw.mac.phydev) {
2470                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2471                         link_stat = mac_state &
2472                                 hdev->hw.mac.phydev->link;
2473                 else
2474                         link_stat = 0;
2475
2476         } else {
2477                 link_stat = mac_state;
2478         }
2479
2480         return !!link_stat;
2481 }
2482
2483 static void hclge_update_link_status(struct hclge_dev *hdev)
2484 {
2485         struct hnae3_client *rclient = hdev->roce_client;
2486         struct hnae3_client *client = hdev->nic_client;
2487         struct hnae3_handle *rhandle;
2488         struct hnae3_handle *handle;
2489         int state;
2490         int i;
2491
2492         if (!client)
2493                 return;
2494         state = hclge_get_mac_phy_link(hdev);
2495         if (state != hdev->hw.mac.link) {
2496                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2497                         handle = &hdev->vport[i].nic;
2498                         client->ops->link_status_change(handle, state);
2499                         hclge_config_mac_tnl_int(hdev, state);
2500                         rhandle = &hdev->vport[i].roce;
2501                         if (rclient && rclient->ops->link_status_change)
2502                                 rclient->ops->link_status_change(rhandle,
2503                                                                  state);
2504                 }
2505                 hdev->hw.mac.link = state;
2506         }
2507 }
2508
2509 static void hclge_update_port_capability(struct hclge_mac *mac)
2510 {
2511         /* update fec ability by speed */
2512         hclge_convert_setting_fec(mac);
2513
2514         /* firmware can not identify back plane type, the media type
2515          * read from configuration can help deal it
2516          */
2517         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2518             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2519                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2520         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2521                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2522
2523         if (mac->support_autoneg == true) {
2524                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2525                 linkmode_copy(mac->advertising, mac->supported);
2526         } else {
2527                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2528                                    mac->supported);
2529                 linkmode_zero(mac->advertising);
2530         }
2531 }
2532
2533 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2534 {
2535         struct hclge_sfp_info_cmd *resp;
2536         struct hclge_desc desc;
2537         int ret;
2538
2539         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2540         resp = (struct hclge_sfp_info_cmd *)desc.data;
2541         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2542         if (ret == -EOPNOTSUPP) {
2543                 dev_warn(&hdev->pdev->dev,
2544                          "IMP do not support get SFP speed %d\n", ret);
2545                 return ret;
2546         } else if (ret) {
2547                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2548                 return ret;
2549         }
2550
2551         *speed = le32_to_cpu(resp->speed);
2552
2553         return 0;
2554 }
2555
2556 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2557 {
2558         struct hclge_sfp_info_cmd *resp;
2559         struct hclge_desc desc;
2560         int ret;
2561
2562         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2563         resp = (struct hclge_sfp_info_cmd *)desc.data;
2564
2565         resp->query_type = QUERY_ACTIVE_SPEED;
2566
2567         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2568         if (ret == -EOPNOTSUPP) {
2569                 dev_warn(&hdev->pdev->dev,
2570                          "IMP does not support get SFP info %d\n", ret);
2571                 return ret;
2572         } else if (ret) {
2573                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2574                 return ret;
2575         }
2576
2577         mac->speed = le32_to_cpu(resp->speed);
2578         /* if resp->speed_ability is 0, it means it's an old version
2579          * firmware, do not update these params
2580          */
2581         if (resp->speed_ability) {
2582                 mac->module_type = le32_to_cpu(resp->module_type);
2583                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2584                 mac->autoneg = resp->autoneg;
2585                 mac->support_autoneg = resp->autoneg_ability;
2586                 if (!resp->active_fec)
2587                         mac->fec_mode = 0;
2588                 else
2589                         mac->fec_mode = BIT(resp->active_fec);
2590         } else {
2591                 mac->speed_type = QUERY_SFP_SPEED;
2592         }
2593
2594         return 0;
2595 }
2596
2597 static int hclge_update_port_info(struct hclge_dev *hdev)
2598 {
2599         struct hclge_mac *mac = &hdev->hw.mac;
2600         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2601         int ret;
2602
2603         /* get the port info from SFP cmd if not copper port */
2604         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2605                 return 0;
2606
2607         /* if IMP does not support get SFP/qSFP info, return directly */
2608         if (!hdev->support_sfp_query)
2609                 return 0;
2610
2611         if (hdev->pdev->revision >= 0x21)
2612                 ret = hclge_get_sfp_info(hdev, mac);
2613         else
2614                 ret = hclge_get_sfp_speed(hdev, &speed);
2615
2616         if (ret == -EOPNOTSUPP) {
2617                 hdev->support_sfp_query = false;
2618                 return ret;
2619         } else if (ret) {
2620                 return ret;
2621         }
2622
2623         if (hdev->pdev->revision >= 0x21) {
2624                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2625                         hclge_update_port_capability(mac);
2626                         return 0;
2627                 }
2628                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2629                                                HCLGE_MAC_FULL);
2630         } else {
2631                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2632                         return 0; /* do nothing if no SFP */
2633
2634                 /* must config full duplex for SFP */
2635                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2636         }
2637 }
2638
2639 static int hclge_get_status(struct hnae3_handle *handle)
2640 {
2641         struct hclge_vport *vport = hclge_get_vport(handle);
2642         struct hclge_dev *hdev = vport->back;
2643
2644         hclge_update_link_status(hdev);
2645
2646         return hdev->hw.mac.link;
2647 }
2648
2649 static void hclge_service_timer(struct timer_list *t)
2650 {
2651         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2652
2653         mod_timer(&hdev->service_timer, jiffies + HZ);
2654         hdev->hw_stats.stats_timer++;
2655         hdev->fd_arfs_expire_timer++;
2656         hclge_task_schedule(hdev);
2657 }
2658
2659 static void hclge_service_complete(struct hclge_dev *hdev)
2660 {
2661         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2662
2663         /* Flush memory before next watchdog */
2664         smp_mb__before_atomic();
2665         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2666 }
2667
2668 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2669 {
2670         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2671
2672         /* fetch the events from their corresponding regs */
2673         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2674         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2675         msix_src_reg = hclge_read_dev(&hdev->hw,
2676                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2677
2678         /* Assumption: If by any chance reset and mailbox events are reported
2679          * together then we will only process reset event in this go and will
2680          * defer the processing of the mailbox events. Since, we would have not
2681          * cleared RX CMDQ event this time we would receive again another
2682          * interrupt from H/W just for the mailbox.
2683          */
2684
2685         /* check for vector0 reset event sources */
2686         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2687                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2688                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2689                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2690                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2691                 hdev->rst_stats.imp_rst_cnt++;
2692                 return HCLGE_VECTOR0_EVENT_RST;
2693         }
2694
2695         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2696                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2697                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2698                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2699                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2700                 hdev->rst_stats.global_rst_cnt++;
2701                 return HCLGE_VECTOR0_EVENT_RST;
2702         }
2703
2704         /* check for vector0 msix event source */
2705         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2706                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2707                         msix_src_reg);
2708                 return HCLGE_VECTOR0_EVENT_ERR;
2709         }
2710
2711         /* check for vector0 mailbox(=CMDQ RX) event source */
2712         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2713                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2714                 *clearval = cmdq_src_reg;
2715                 return HCLGE_VECTOR0_EVENT_MBX;
2716         }
2717
2718         /* print other vector0 event source */
2719         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2720                 cmdq_src_reg, msix_src_reg);
2721         return HCLGE_VECTOR0_EVENT_OTHER;
2722 }
2723
2724 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2725                                     u32 regclr)
2726 {
2727         switch (event_type) {
2728         case HCLGE_VECTOR0_EVENT_RST:
2729                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2730                 break;
2731         case HCLGE_VECTOR0_EVENT_MBX:
2732                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2733                 break;
2734         default:
2735                 break;
2736         }
2737 }
2738
2739 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2740 {
2741         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2742                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2743                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2744                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2745         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2746 }
2747
2748 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2749 {
2750         writel(enable ? 1 : 0, vector->addr);
2751 }
2752
2753 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2754 {
2755         struct hclge_dev *hdev = data;
2756         u32 clearval = 0;
2757         u32 event_cause;
2758
2759         hclge_enable_vector(&hdev->misc_vector, false);
2760         event_cause = hclge_check_event_cause(hdev, &clearval);
2761
2762         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2763         switch (event_cause) {
2764         case HCLGE_VECTOR0_EVENT_ERR:
2765                 /* we do not know what type of reset is required now. This could
2766                  * only be decided after we fetch the type of errors which
2767                  * caused this event. Therefore, we will do below for now:
2768                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2769                  *    have defered type of reset to be used.
2770                  * 2. Schedule the reset serivce task.
2771                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2772                  *    will fetch the correct type of reset.  This would be done
2773                  *    by first decoding the types of errors.
2774                  */
2775                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2776                 /* fall through */
2777         case HCLGE_VECTOR0_EVENT_RST:
2778                 hclge_reset_task_schedule(hdev);
2779                 break;
2780         case HCLGE_VECTOR0_EVENT_MBX:
2781                 /* If we are here then,
2782                  * 1. Either we are not handling any mbx task and we are not
2783                  *    scheduled as well
2784                  *                        OR
2785                  * 2. We could be handling a mbx task but nothing more is
2786                  *    scheduled.
2787                  * In both cases, we should schedule mbx task as there are more
2788                  * mbx messages reported by this interrupt.
2789                  */
2790                 hclge_mbx_task_schedule(hdev);
2791                 break;
2792         default:
2793                 dev_warn(&hdev->pdev->dev,
2794                          "received unknown or unhandled event of vector0\n");
2795                 break;
2796         }
2797
2798         /* clear the source of interrupt if it is not cause by reset */
2799         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2800                 hclge_clear_event_cause(hdev, event_cause, clearval);
2801                 hclge_enable_vector(&hdev->misc_vector, true);
2802         }
2803
2804         return IRQ_HANDLED;
2805 }
2806
2807 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2808 {
2809         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2810                 dev_warn(&hdev->pdev->dev,
2811                          "vector(vector_id %d) has been freed.\n", vector_id);
2812                 return;
2813         }
2814
2815         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2816         hdev->num_msi_left += 1;
2817         hdev->num_msi_used -= 1;
2818 }
2819
2820 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2821 {
2822         struct hclge_misc_vector *vector = &hdev->misc_vector;
2823
2824         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2825
2826         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2827         hdev->vector_status[0] = 0;
2828
2829         hdev->num_msi_left -= 1;
2830         hdev->num_msi_used += 1;
2831 }
2832
2833 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2834 {
2835         int ret;
2836
2837         hclge_get_misc_vector(hdev);
2838
2839         /* this would be explicitly freed in the end */
2840         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2841                           0, "hclge_misc", hdev);
2842         if (ret) {
2843                 hclge_free_vector(hdev, 0);
2844                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2845                         hdev->misc_vector.vector_irq);
2846         }
2847
2848         return ret;
2849 }
2850
2851 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2852 {
2853         free_irq(hdev->misc_vector.vector_irq, hdev);
2854         hclge_free_vector(hdev, 0);
2855 }
2856
2857 int hclge_notify_client(struct hclge_dev *hdev,
2858                         enum hnae3_reset_notify_type type)
2859 {
2860         struct hnae3_client *client = hdev->nic_client;
2861         u16 i;
2862
2863         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
2864                 return 0;
2865
2866         if (!client->ops->reset_notify)
2867                 return -EOPNOTSUPP;
2868
2869         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2870                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2871                 int ret;
2872
2873                 ret = client->ops->reset_notify(handle, type);
2874                 if (ret) {
2875                         dev_err(&hdev->pdev->dev,
2876                                 "notify nic client failed %d(%d)\n", type, ret);
2877                         return ret;
2878                 }
2879         }
2880
2881         return 0;
2882 }
2883
2884 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2885                                     enum hnae3_reset_notify_type type)
2886 {
2887         struct hnae3_client *client = hdev->roce_client;
2888         int ret = 0;
2889         u16 i;
2890
2891         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
2892                 return 0;
2893
2894         if (!client->ops->reset_notify)
2895                 return -EOPNOTSUPP;
2896
2897         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2898                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2899
2900                 ret = client->ops->reset_notify(handle, type);
2901                 if (ret) {
2902                         dev_err(&hdev->pdev->dev,
2903                                 "notify roce client failed %d(%d)",
2904                                 type, ret);
2905                         return ret;
2906                 }
2907         }
2908
2909         return ret;
2910 }
2911
2912 static int hclge_reset_wait(struct hclge_dev *hdev)
2913 {
2914 #define HCLGE_RESET_WATI_MS     100
2915 #define HCLGE_RESET_WAIT_CNT    200
2916         u32 val, reg, reg_bit;
2917         u32 cnt = 0;
2918
2919         switch (hdev->reset_type) {
2920         case HNAE3_IMP_RESET:
2921                 reg = HCLGE_GLOBAL_RESET_REG;
2922                 reg_bit = HCLGE_IMP_RESET_BIT;
2923                 break;
2924         case HNAE3_GLOBAL_RESET:
2925                 reg = HCLGE_GLOBAL_RESET_REG;
2926                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2927                 break;
2928         case HNAE3_FUNC_RESET:
2929                 reg = HCLGE_FUN_RST_ING;
2930                 reg_bit = HCLGE_FUN_RST_ING_B;
2931                 break;
2932         case HNAE3_FLR_RESET:
2933                 break;
2934         default:
2935                 dev_err(&hdev->pdev->dev,
2936                         "Wait for unsupported reset type: %d\n",
2937                         hdev->reset_type);
2938                 return -EINVAL;
2939         }
2940
2941         if (hdev->reset_type == HNAE3_FLR_RESET) {
2942                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2943                        cnt++ < HCLGE_RESET_WAIT_CNT)
2944                         msleep(HCLGE_RESET_WATI_MS);
2945
2946                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2947                         dev_err(&hdev->pdev->dev,
2948                                 "flr wait timeout: %d\n", cnt);
2949                         return -EBUSY;
2950                 }
2951
2952                 return 0;
2953         }
2954
2955         val = hclge_read_dev(&hdev->hw, reg);
2956         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2957                 msleep(HCLGE_RESET_WATI_MS);
2958                 val = hclge_read_dev(&hdev->hw, reg);
2959                 cnt++;
2960         }
2961
2962         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2963                 dev_warn(&hdev->pdev->dev,
2964                          "Wait for reset timeout: %d\n", hdev->reset_type);
2965                 return -EBUSY;
2966         }
2967
2968         return 0;
2969 }
2970
2971 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2972 {
2973         struct hclge_vf_rst_cmd *req;
2974         struct hclge_desc desc;
2975
2976         req = (struct hclge_vf_rst_cmd *)desc.data;
2977         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2978         req->dest_vfid = func_id;
2979
2980         if (reset)
2981                 req->vf_rst = 0x1;
2982
2983         return hclge_cmd_send(&hdev->hw, &desc, 1);
2984 }
2985
2986 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2987 {
2988         int i;
2989
2990         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2991                 struct hclge_vport *vport = &hdev->vport[i];
2992                 int ret;
2993
2994                 /* Send cmd to set/clear VF's FUNC_RST_ING */
2995                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2996                 if (ret) {
2997                         dev_err(&hdev->pdev->dev,
2998                                 "set vf(%d) rst failed %d!\n",
2999                                 vport->vport_id, ret);
3000                         return ret;
3001                 }
3002
3003                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3004                         continue;
3005
3006                 /* Inform VF to process the reset.
3007                  * hclge_inform_reset_assert_to_vf may fail if VF
3008                  * driver is not loaded.
3009                  */
3010                 ret = hclge_inform_reset_assert_to_vf(vport);
3011                 if (ret)
3012                         dev_warn(&hdev->pdev->dev,
3013                                  "inform reset to vf(%d) failed %d!\n",
3014                                  vport->vport_id, ret);
3015         }
3016
3017         return 0;
3018 }
3019
3020 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3021 {
3022         struct hclge_desc desc;
3023         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3024         int ret;
3025
3026         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3027         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3028         req->fun_reset_vfid = func_id;
3029
3030         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3031         if (ret)
3032                 dev_err(&hdev->pdev->dev,
3033                         "send function reset cmd fail, status =%d\n", ret);
3034
3035         return ret;
3036 }
3037
3038 static void hclge_do_reset(struct hclge_dev *hdev)
3039 {
3040         struct hnae3_handle *handle = &hdev->vport[0].nic;
3041         struct pci_dev *pdev = hdev->pdev;
3042         u32 val;
3043
3044         if (hclge_get_hw_reset_stat(handle)) {
3045                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3046                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3047                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3048                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3049                 return;
3050         }
3051
3052         switch (hdev->reset_type) {
3053         case HNAE3_GLOBAL_RESET:
3054                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3055                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3056                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3057                 dev_info(&pdev->dev, "Global Reset requested\n");
3058                 break;
3059         case HNAE3_FUNC_RESET:
3060                 dev_info(&pdev->dev, "PF Reset requested\n");
3061                 /* schedule again to check later */
3062                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3063                 hclge_reset_task_schedule(hdev);
3064                 break;
3065         case HNAE3_FLR_RESET:
3066                 dev_info(&pdev->dev, "FLR requested\n");
3067                 /* schedule again to check later */
3068                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3069                 hclge_reset_task_schedule(hdev);
3070                 break;
3071         default:
3072                 dev_warn(&pdev->dev,
3073                          "Unsupported reset type: %d\n", hdev->reset_type);
3074                 break;
3075         }
3076 }
3077
3078 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3079                                                    unsigned long *addr)
3080 {
3081         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3082         struct hclge_dev *hdev = ae_dev->priv;
3083
3084         /* first, resolve any unknown reset type to the known type(s) */
3085         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3086                 /* we will intentionally ignore any errors from this function
3087                  *  as we will end up in *some* reset request in any case
3088                  */
3089                 hclge_handle_hw_msix_error(hdev, addr);
3090                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3091                 /* We defered the clearing of the error event which caused
3092                  * interrupt since it was not posssible to do that in
3093                  * interrupt context (and this is the reason we introduced
3094                  * new UNKNOWN reset type). Now, the errors have been
3095                  * handled and cleared in hardware we can safely enable
3096                  * interrupts. This is an exception to the norm.
3097                  */
3098                 hclge_enable_vector(&hdev->misc_vector, true);
3099         }
3100
3101         /* return the highest priority reset level amongst all */
3102         if (test_bit(HNAE3_IMP_RESET, addr)) {
3103                 rst_level = HNAE3_IMP_RESET;
3104                 clear_bit(HNAE3_IMP_RESET, addr);
3105                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3106                 clear_bit(HNAE3_FUNC_RESET, addr);
3107         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3108                 rst_level = HNAE3_GLOBAL_RESET;
3109                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3110                 clear_bit(HNAE3_FUNC_RESET, addr);
3111         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3112                 rst_level = HNAE3_FUNC_RESET;
3113                 clear_bit(HNAE3_FUNC_RESET, addr);
3114         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3115                 rst_level = HNAE3_FLR_RESET;
3116                 clear_bit(HNAE3_FLR_RESET, addr);
3117         }
3118
3119         if (hdev->reset_type != HNAE3_NONE_RESET &&
3120             rst_level < hdev->reset_type)
3121                 return HNAE3_NONE_RESET;
3122
3123         return rst_level;
3124 }
3125
3126 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3127 {
3128         u32 clearval = 0;
3129
3130         switch (hdev->reset_type) {
3131         case HNAE3_IMP_RESET:
3132                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3133                 break;
3134         case HNAE3_GLOBAL_RESET:
3135                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3136                 break;
3137         default:
3138                 break;
3139         }
3140
3141         if (!clearval)
3142                 return;
3143
3144         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3145         hclge_enable_vector(&hdev->misc_vector, true);
3146 }
3147
3148 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3149 {
3150         int ret = 0;
3151
3152         switch (hdev->reset_type) {
3153         case HNAE3_FUNC_RESET:
3154                 /* fall through */
3155         case HNAE3_FLR_RESET:
3156                 ret = hclge_set_all_vf_rst(hdev, true);
3157                 break;
3158         default:
3159                 break;
3160         }
3161
3162         return ret;
3163 }
3164
3165 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3166 {
3167 #define HCLGE_RESET_SYNC_TIME 100
3168
3169         u32 reg_val;
3170         int ret = 0;
3171
3172         switch (hdev->reset_type) {
3173         case HNAE3_FUNC_RESET:
3174                 /* There is no mechanism for PF to know if VF has stopped IO
3175                  * for now, just wait 100 ms for VF to stop IO
3176                  */
3177                 msleep(HCLGE_RESET_SYNC_TIME);
3178                 ret = hclge_func_reset_cmd(hdev, 0);
3179                 if (ret) {
3180                         dev_err(&hdev->pdev->dev,
3181                                 "asserting function reset fail %d!\n", ret);
3182                         return ret;
3183                 }
3184
3185                 /* After performaning pf reset, it is not necessary to do the
3186                  * mailbox handling or send any command to firmware, because
3187                  * any mailbox handling or command to firmware is only valid
3188                  * after hclge_cmd_init is called.
3189                  */
3190                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3191                 hdev->rst_stats.pf_rst_cnt++;
3192                 break;
3193         case HNAE3_FLR_RESET:
3194                 /* There is no mechanism for PF to know if VF has stopped IO
3195                  * for now, just wait 100 ms for VF to stop IO
3196                  */
3197                 msleep(HCLGE_RESET_SYNC_TIME);
3198                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3199                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3200                 hdev->rst_stats.flr_rst_cnt++;
3201                 break;
3202         case HNAE3_IMP_RESET:
3203                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3204                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3205                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3206                 break;
3207         default:
3208                 break;
3209         }
3210
3211         /* inform hardware that preparatory work is done */
3212         msleep(HCLGE_RESET_SYNC_TIME);
3213         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3214                         HCLGE_NIC_CMQ_ENABLE);
3215         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3216
3217         return ret;
3218 }
3219
3220 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3221 {
3222 #define MAX_RESET_FAIL_CNT 5
3223
3224         if (hdev->reset_pending) {
3225                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3226                          hdev->reset_pending);
3227                 return true;
3228         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3229                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3230                     BIT(HCLGE_IMP_RESET_BIT))) {
3231                 dev_info(&hdev->pdev->dev,
3232                          "reset failed because IMP Reset is pending\n");
3233                 hclge_clear_reset_cause(hdev);
3234                 return false;
3235         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3236                 hdev->reset_fail_cnt++;
3237                 if (is_timeout) {
3238                         set_bit(hdev->reset_type, &hdev->reset_pending);
3239                         dev_info(&hdev->pdev->dev,
3240                                  "re-schedule to wait for hw reset done\n");
3241                         return true;
3242                 }
3243
3244                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3245                 hclge_clear_reset_cause(hdev);
3246                 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3247                 mod_timer(&hdev->reset_timer,
3248                           jiffies + HCLGE_RESET_INTERVAL);
3249
3250                 return false;
3251         }
3252
3253         hclge_clear_reset_cause(hdev);
3254         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3255         return false;
3256 }
3257
3258 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3259 {
3260         int ret = 0;
3261
3262         switch (hdev->reset_type) {
3263         case HNAE3_FUNC_RESET:
3264                 /* fall through */
3265         case HNAE3_FLR_RESET:
3266                 ret = hclge_set_all_vf_rst(hdev, false);
3267                 break;
3268         default:
3269                 break;
3270         }
3271
3272         return ret;
3273 }
3274
3275 static int hclge_reset_stack(struct hclge_dev *hdev)
3276 {
3277         int ret;
3278
3279         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3280         if (ret)
3281                 return ret;
3282
3283         ret = hclge_reset_ae_dev(hdev->ae_dev);
3284         if (ret)
3285                 return ret;
3286
3287         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3288         if (ret)
3289                 return ret;
3290
3291         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3292 }
3293
3294 static void hclge_reset(struct hclge_dev *hdev)
3295 {
3296         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3297         bool is_timeout = false;
3298         int ret;
3299
3300         /* Initialize ae_dev reset status as well, in case enet layer wants to
3301          * know if device is undergoing reset
3302          */
3303         ae_dev->reset_type = hdev->reset_type;
3304         hdev->rst_stats.reset_cnt++;
3305         /* perform reset of the stack & ae device for a client */
3306         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3307         if (ret)
3308                 goto err_reset;
3309
3310         ret = hclge_reset_prepare_down(hdev);
3311         if (ret)
3312                 goto err_reset;
3313
3314         rtnl_lock();
3315         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3316         if (ret)
3317                 goto err_reset_lock;
3318
3319         rtnl_unlock();
3320
3321         ret = hclge_reset_prepare_wait(hdev);
3322         if (ret)
3323                 goto err_reset;
3324
3325         if (hclge_reset_wait(hdev)) {
3326                 is_timeout = true;
3327                 goto err_reset;
3328         }
3329
3330         hdev->rst_stats.hw_reset_done_cnt++;
3331
3332         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3333         if (ret)
3334                 goto err_reset;
3335
3336         rtnl_lock();
3337
3338         ret = hclge_reset_stack(hdev);
3339         if (ret)
3340                 goto err_reset_lock;
3341
3342         hclge_clear_reset_cause(hdev);
3343
3344         ret = hclge_reset_prepare_up(hdev);
3345         if (ret)
3346                 goto err_reset_lock;
3347
3348         rtnl_unlock();
3349
3350         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3351         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3352          * times
3353          */
3354         if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3355                 goto err_reset;
3356
3357         rtnl_lock();
3358
3359         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3360         if (ret)
3361                 goto err_reset_lock;
3362
3363         rtnl_unlock();
3364
3365         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3366         if (ret)
3367                 goto err_reset;
3368
3369         hdev->last_reset_time = jiffies;
3370         hdev->reset_fail_cnt = 0;
3371         hdev->rst_stats.reset_done_cnt++;
3372         ae_dev->reset_type = HNAE3_NONE_RESET;
3373         del_timer(&hdev->reset_timer);
3374
3375         return;
3376
3377 err_reset_lock:
3378         rtnl_unlock();
3379 err_reset:
3380         if (hclge_reset_err_handle(hdev, is_timeout))
3381                 hclge_reset_task_schedule(hdev);
3382 }
3383
3384 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3385 {
3386         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3387         struct hclge_dev *hdev = ae_dev->priv;
3388
3389         /* We might end up getting called broadly because of 2 below cases:
3390          * 1. Recoverable error was conveyed through APEI and only way to bring
3391          *    normalcy is to reset.
3392          * 2. A new reset request from the stack due to timeout
3393          *
3394          * For the first case,error event might not have ae handle available.
3395          * check if this is a new reset request and we are not here just because
3396          * last reset attempt did not succeed and watchdog hit us again. We will
3397          * know this if last reset request did not occur very recently (watchdog
3398          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3399          * In case of new request we reset the "reset level" to PF reset.
3400          * And if it is a repeat reset request of the most recent one then we
3401          * want to make sure we throttle the reset request. Therefore, we will
3402          * not allow it again before 3*HZ times.
3403          */
3404         if (!handle)
3405                 handle = &hdev->vport[0].nic;
3406
3407         if (time_before(jiffies, (hdev->last_reset_time +
3408                                   HCLGE_RESET_INTERVAL)))
3409                 return;
3410         else if (hdev->default_reset_request)
3411                 hdev->reset_level =
3412                         hclge_get_reset_level(ae_dev,
3413                                               &hdev->default_reset_request);
3414         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3415                 hdev->reset_level = HNAE3_FUNC_RESET;
3416
3417         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3418                  hdev->reset_level);
3419
3420         /* request reset & schedule reset task */
3421         set_bit(hdev->reset_level, &hdev->reset_request);
3422         hclge_reset_task_schedule(hdev);
3423
3424         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3425                 hdev->reset_level++;
3426 }
3427
3428 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3429                                         enum hnae3_reset_type rst_type)
3430 {
3431         struct hclge_dev *hdev = ae_dev->priv;
3432
3433         set_bit(rst_type, &hdev->default_reset_request);
3434 }
3435
3436 static void hclge_reset_timer(struct timer_list *t)
3437 {
3438         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3439
3440         dev_info(&hdev->pdev->dev,
3441                  "triggering reset in reset timer\n");
3442         hclge_reset_event(hdev->pdev, NULL);
3443 }
3444
3445 static void hclge_reset_subtask(struct hclge_dev *hdev)
3446 {
3447         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3448
3449         /* check if there is any ongoing reset in the hardware. This status can
3450          * be checked from reset_pending. If there is then, we need to wait for
3451          * hardware to complete reset.
3452          *    a. If we are able to figure out in reasonable time that hardware
3453          *       has fully resetted then, we can proceed with driver, client
3454          *       reset.
3455          *    b. else, we can come back later to check this status so re-sched
3456          *       now.
3457          */
3458         hdev->last_reset_time = jiffies;
3459         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3460         if (hdev->reset_type != HNAE3_NONE_RESET)
3461                 hclge_reset(hdev);
3462
3463         /* check if we got any *new* reset requests to be honored */
3464         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3465         if (hdev->reset_type != HNAE3_NONE_RESET)
3466                 hclge_do_reset(hdev);
3467
3468         hdev->reset_type = HNAE3_NONE_RESET;
3469 }
3470
3471 static void hclge_reset_service_task(struct work_struct *work)
3472 {
3473         struct hclge_dev *hdev =
3474                 container_of(work, struct hclge_dev, rst_service_task);
3475
3476         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3477                 return;
3478
3479         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3480
3481         hclge_reset_subtask(hdev);
3482
3483         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3484 }
3485
3486 static void hclge_mailbox_service_task(struct work_struct *work)
3487 {
3488         struct hclge_dev *hdev =
3489                 container_of(work, struct hclge_dev, mbx_service_task);
3490
3491         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3492                 return;
3493
3494         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3495
3496         hclge_mbx_handler(hdev);
3497
3498         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3499 }
3500
3501 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3502 {
3503         int i;
3504
3505         /* start from vport 1 for PF is always alive */
3506         for (i = 1; i < hdev->num_alloc_vport; i++) {
3507                 struct hclge_vport *vport = &hdev->vport[i];
3508
3509                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3510                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3511
3512                 /* If vf is not alive, set to default value */
3513                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3514                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3515         }
3516 }
3517
3518 static void hclge_service_task(struct work_struct *work)
3519 {
3520         struct hclge_dev *hdev =
3521                 container_of(work, struct hclge_dev, service_task);
3522
3523         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3524                 hclge_update_stats_for_all(hdev);
3525                 hdev->hw_stats.stats_timer = 0;
3526         }
3527
3528         hclge_update_port_info(hdev);
3529         hclge_update_link_status(hdev);
3530         hclge_update_vport_alive(hdev);
3531         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3532                 hclge_rfs_filter_expire(hdev);
3533                 hdev->fd_arfs_expire_timer = 0;
3534         }
3535         hclge_service_complete(hdev);
3536 }
3537
3538 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3539 {
3540         /* VF handle has no client */
3541         if (!handle->client)
3542                 return container_of(handle, struct hclge_vport, nic);
3543         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3544                 return container_of(handle, struct hclge_vport, roce);
3545         else
3546                 return container_of(handle, struct hclge_vport, nic);
3547 }
3548
3549 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3550                             struct hnae3_vector_info *vector_info)
3551 {
3552         struct hclge_vport *vport = hclge_get_vport(handle);
3553         struct hnae3_vector_info *vector = vector_info;
3554         struct hclge_dev *hdev = vport->back;
3555         int alloc = 0;
3556         int i, j;
3557
3558         vector_num = min(hdev->num_msi_left, vector_num);
3559
3560         for (j = 0; j < vector_num; j++) {
3561                 for (i = 1; i < hdev->num_msi; i++) {
3562                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3563                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3564                                 vector->io_addr = hdev->hw.io_base +
3565                                         HCLGE_VECTOR_REG_BASE +
3566                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3567                                         vport->vport_id *
3568                                         HCLGE_VECTOR_VF_OFFSET;
3569                                 hdev->vector_status[i] = vport->vport_id;
3570                                 hdev->vector_irq[i] = vector->vector;
3571
3572                                 vector++;
3573                                 alloc++;
3574
3575                                 break;
3576                         }
3577                 }
3578         }
3579         hdev->num_msi_left -= alloc;
3580         hdev->num_msi_used += alloc;
3581
3582         return alloc;
3583 }
3584
3585 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3586 {
3587         int i;
3588
3589         for (i = 0; i < hdev->num_msi; i++)
3590                 if (vector == hdev->vector_irq[i])
3591                         return i;
3592
3593         return -EINVAL;
3594 }
3595
3596 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3597 {
3598         struct hclge_vport *vport = hclge_get_vport(handle);
3599         struct hclge_dev *hdev = vport->back;
3600         int vector_id;
3601
3602         vector_id = hclge_get_vector_index(hdev, vector);
3603         if (vector_id < 0) {
3604                 dev_err(&hdev->pdev->dev,
3605                         "Get vector index fail. vector_id =%d\n", vector_id);
3606                 return vector_id;
3607         }
3608
3609         hclge_free_vector(hdev, vector_id);
3610
3611         return 0;
3612 }
3613
3614 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3615 {
3616         return HCLGE_RSS_KEY_SIZE;
3617 }
3618
3619 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3620 {
3621         return HCLGE_RSS_IND_TBL_SIZE;
3622 }
3623
3624 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3625                                   const u8 hfunc, const u8 *key)
3626 {
3627         struct hclge_rss_config_cmd *req;
3628         unsigned int key_offset = 0;
3629         struct hclge_desc desc;
3630         int key_counts;
3631         int key_size;
3632         int ret;
3633
3634         key_counts = HCLGE_RSS_KEY_SIZE;
3635         req = (struct hclge_rss_config_cmd *)desc.data;
3636
3637         while (key_counts) {
3638                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3639                                            false);
3640
3641                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3642                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3643
3644                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3645                 memcpy(req->hash_key,
3646                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3647
3648                 key_counts -= key_size;
3649                 key_offset++;
3650                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3651                 if (ret) {
3652                         dev_err(&hdev->pdev->dev,
3653                                 "Configure RSS config fail, status = %d\n",
3654                                 ret);
3655                         return ret;
3656                 }
3657         }
3658         return 0;
3659 }
3660
3661 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3662 {
3663         struct hclge_rss_indirection_table_cmd *req;
3664         struct hclge_desc desc;
3665         int i, j;
3666         int ret;
3667
3668         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3669
3670         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3671                 hclge_cmd_setup_basic_desc
3672                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3673
3674                 req->start_table_index =
3675                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3676                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3677
3678                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3679                         req->rss_result[j] =
3680                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3681
3682                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3683                 if (ret) {
3684                         dev_err(&hdev->pdev->dev,
3685                                 "Configure rss indir table fail,status = %d\n",
3686                                 ret);
3687                         return ret;
3688                 }
3689         }
3690         return 0;
3691 }
3692
3693 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3694                                  u16 *tc_size, u16 *tc_offset)
3695 {
3696         struct hclge_rss_tc_mode_cmd *req;
3697         struct hclge_desc desc;
3698         int ret;
3699         int i;
3700
3701         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3702         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3703
3704         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3705                 u16 mode = 0;
3706
3707                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3708                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3709                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3710                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3711                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3712
3713                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3714         }
3715
3716         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3717         if (ret)
3718                 dev_err(&hdev->pdev->dev,
3719                         "Configure rss tc mode fail, status = %d\n", ret);
3720
3721         return ret;
3722 }
3723
3724 static void hclge_get_rss_type(struct hclge_vport *vport)
3725 {
3726         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3727             vport->rss_tuple_sets.ipv4_udp_en ||
3728             vport->rss_tuple_sets.ipv4_sctp_en ||
3729             vport->rss_tuple_sets.ipv6_tcp_en ||
3730             vport->rss_tuple_sets.ipv6_udp_en ||
3731             vport->rss_tuple_sets.ipv6_sctp_en)
3732                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3733         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3734                  vport->rss_tuple_sets.ipv6_fragment_en)
3735                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3736         else
3737                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3738 }
3739
3740 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3741 {
3742         struct hclge_rss_input_tuple_cmd *req;
3743         struct hclge_desc desc;
3744         int ret;
3745
3746         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3747
3748         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3749
3750         /* Get the tuple cfg from pf */
3751         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3752         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3753         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3754         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3755         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3756         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3757         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3758         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3759         hclge_get_rss_type(&hdev->vport[0]);
3760         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3761         if (ret)
3762                 dev_err(&hdev->pdev->dev,
3763                         "Configure rss input fail, status = %d\n", ret);
3764         return ret;
3765 }
3766
3767 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3768                          u8 *key, u8 *hfunc)
3769 {
3770         struct hclge_vport *vport = hclge_get_vport(handle);
3771         int i;
3772
3773         /* Get hash algorithm */
3774         if (hfunc) {
3775                 switch (vport->rss_algo) {
3776                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3777                         *hfunc = ETH_RSS_HASH_TOP;
3778                         break;
3779                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3780                         *hfunc = ETH_RSS_HASH_XOR;
3781                         break;
3782                 default:
3783                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3784                         break;
3785                 }
3786         }
3787
3788         /* Get the RSS Key required by the user */
3789         if (key)
3790                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3791
3792         /* Get indirect table */
3793         if (indir)
3794                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3795                         indir[i] =  vport->rss_indirection_tbl[i];
3796
3797         return 0;
3798 }
3799
3800 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3801                          const  u8 *key, const  u8 hfunc)
3802 {
3803         struct hclge_vport *vport = hclge_get_vport(handle);
3804         struct hclge_dev *hdev = vport->back;
3805         u8 hash_algo;
3806         int ret, i;
3807
3808         /* Set the RSS Hash Key if specififed by the user */
3809         if (key) {
3810                 switch (hfunc) {
3811                 case ETH_RSS_HASH_TOP:
3812                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3813                         break;
3814                 case ETH_RSS_HASH_XOR:
3815                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3816                         break;
3817                 case ETH_RSS_HASH_NO_CHANGE:
3818                         hash_algo = vport->rss_algo;
3819                         break;
3820                 default:
3821                         return -EINVAL;
3822                 }
3823
3824                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3825                 if (ret)
3826                         return ret;
3827
3828                 /* Update the shadow RSS key with user specified qids */
3829                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3830                 vport->rss_algo = hash_algo;
3831         }
3832
3833         /* Update the shadow RSS table with user specified qids */
3834         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3835                 vport->rss_indirection_tbl[i] = indir[i];
3836
3837         /* Update the hardware */
3838         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3839 }
3840
3841 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3842 {
3843         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3844
3845         if (nfc->data & RXH_L4_B_2_3)
3846                 hash_sets |= HCLGE_D_PORT_BIT;
3847         else
3848                 hash_sets &= ~HCLGE_D_PORT_BIT;
3849
3850         if (nfc->data & RXH_IP_SRC)
3851                 hash_sets |= HCLGE_S_IP_BIT;
3852         else
3853                 hash_sets &= ~HCLGE_S_IP_BIT;
3854
3855         if (nfc->data & RXH_IP_DST)
3856                 hash_sets |= HCLGE_D_IP_BIT;
3857         else
3858                 hash_sets &= ~HCLGE_D_IP_BIT;
3859
3860         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3861                 hash_sets |= HCLGE_V_TAG_BIT;
3862
3863         return hash_sets;
3864 }
3865
3866 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3867                                struct ethtool_rxnfc *nfc)
3868 {
3869         struct hclge_vport *vport = hclge_get_vport(handle);
3870         struct hclge_dev *hdev = vport->back;
3871         struct hclge_rss_input_tuple_cmd *req;
3872         struct hclge_desc desc;
3873         u8 tuple_sets;
3874         int ret;
3875
3876         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3877                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3878                 return -EINVAL;
3879
3880         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3881         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3882
3883         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3884         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3885         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3886         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3887         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3888         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3889         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3890         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3891
3892         tuple_sets = hclge_get_rss_hash_bits(nfc);
3893         switch (nfc->flow_type) {
3894         case TCP_V4_FLOW:
3895                 req->ipv4_tcp_en = tuple_sets;
3896                 break;
3897         case TCP_V6_FLOW:
3898                 req->ipv6_tcp_en = tuple_sets;
3899                 break;
3900         case UDP_V4_FLOW:
3901                 req->ipv4_udp_en = tuple_sets;
3902                 break;
3903         case UDP_V6_FLOW:
3904                 req->ipv6_udp_en = tuple_sets;
3905                 break;
3906         case SCTP_V4_FLOW:
3907                 req->ipv4_sctp_en = tuple_sets;
3908                 break;
3909         case SCTP_V6_FLOW:
3910                 if ((nfc->data & RXH_L4_B_0_1) ||
3911                     (nfc->data & RXH_L4_B_2_3))
3912                         return -EINVAL;
3913
3914                 req->ipv6_sctp_en = tuple_sets;
3915                 break;
3916         case IPV4_FLOW:
3917                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3918                 break;
3919         case IPV6_FLOW:
3920                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3921                 break;
3922         default:
3923                 return -EINVAL;
3924         }
3925
3926         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3927         if (ret) {
3928                 dev_err(&hdev->pdev->dev,
3929                         "Set rss tuple fail, status = %d\n", ret);
3930                 return ret;
3931         }
3932
3933         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3934         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3935         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3936         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3937         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3938         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3939         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3940         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3941         hclge_get_rss_type(vport);
3942         return 0;
3943 }
3944
3945 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3946                                struct ethtool_rxnfc *nfc)
3947 {
3948         struct hclge_vport *vport = hclge_get_vport(handle);
3949         u8 tuple_sets;
3950
3951         nfc->data = 0;
3952
3953         switch (nfc->flow_type) {
3954         case TCP_V4_FLOW:
3955                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3956                 break;
3957         case UDP_V4_FLOW:
3958                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3959                 break;
3960         case TCP_V6_FLOW:
3961                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3962                 break;
3963         case UDP_V6_FLOW:
3964                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3965                 break;
3966         case SCTP_V4_FLOW:
3967                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3968                 break;
3969         case SCTP_V6_FLOW:
3970                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3971                 break;
3972         case IPV4_FLOW:
3973         case IPV6_FLOW:
3974                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3975                 break;
3976         default:
3977                 return -EINVAL;
3978         }
3979
3980         if (!tuple_sets)
3981                 return 0;
3982
3983         if (tuple_sets & HCLGE_D_PORT_BIT)
3984                 nfc->data |= RXH_L4_B_2_3;
3985         if (tuple_sets & HCLGE_S_PORT_BIT)
3986                 nfc->data |= RXH_L4_B_0_1;
3987         if (tuple_sets & HCLGE_D_IP_BIT)
3988                 nfc->data |= RXH_IP_DST;
3989         if (tuple_sets & HCLGE_S_IP_BIT)
3990                 nfc->data |= RXH_IP_SRC;
3991
3992         return 0;
3993 }
3994
3995 static int hclge_get_tc_size(struct hnae3_handle *handle)
3996 {
3997         struct hclge_vport *vport = hclge_get_vport(handle);
3998         struct hclge_dev *hdev = vport->back;
3999
4000         return hdev->rss_size_max;
4001 }
4002
4003 int hclge_rss_init_hw(struct hclge_dev *hdev)
4004 {
4005         struct hclge_vport *vport = hdev->vport;
4006         u8 *rss_indir = vport[0].rss_indirection_tbl;
4007         u16 rss_size = vport[0].alloc_rss_size;
4008         u8 *key = vport[0].rss_hash_key;
4009         u8 hfunc = vport[0].rss_algo;
4010         u16 tc_offset[HCLGE_MAX_TC_NUM];
4011         u16 tc_valid[HCLGE_MAX_TC_NUM];
4012         u16 tc_size[HCLGE_MAX_TC_NUM];
4013         u16 roundup_size;
4014         unsigned int i;
4015         int ret;
4016
4017         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4018         if (ret)
4019                 return ret;
4020
4021         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4022         if (ret)
4023                 return ret;
4024
4025         ret = hclge_set_rss_input_tuple(hdev);
4026         if (ret)
4027                 return ret;
4028
4029         /* Each TC have the same queue size, and tc_size set to hardware is
4030          * the log2 of roundup power of two of rss_size, the acutal queue
4031          * size is limited by indirection table.
4032          */
4033         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4034                 dev_err(&hdev->pdev->dev,
4035                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4036                         rss_size);
4037                 return -EINVAL;
4038         }
4039
4040         roundup_size = roundup_pow_of_two(rss_size);
4041         roundup_size = ilog2(roundup_size);
4042
4043         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4044                 tc_valid[i] = 0;
4045
4046                 if (!(hdev->hw_tc_map & BIT(i)))
4047                         continue;
4048
4049                 tc_valid[i] = 1;
4050                 tc_size[i] = roundup_size;
4051                 tc_offset[i] = rss_size * i;
4052         }
4053
4054         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4055 }
4056
4057 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4058 {
4059         struct hclge_vport *vport = hdev->vport;
4060         int i, j;
4061
4062         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4063                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4064                         vport[j].rss_indirection_tbl[i] =
4065                                 i % vport[j].alloc_rss_size;
4066         }
4067 }
4068
4069 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4070 {
4071         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4072         struct hclge_vport *vport = hdev->vport;
4073
4074         if (hdev->pdev->revision >= 0x21)
4075                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4076
4077         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4078                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4079                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4080                 vport[i].rss_tuple_sets.ipv4_udp_en =
4081                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4082                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4083                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4084                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4085                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4086                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4087                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4088                 vport[i].rss_tuple_sets.ipv6_udp_en =
4089                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4090                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4091                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4092                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4093                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4094
4095                 vport[i].rss_algo = rss_algo;
4096
4097                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4098                        HCLGE_RSS_KEY_SIZE);
4099         }
4100
4101         hclge_rss_indir_init_cfg(hdev);
4102 }
4103
4104 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4105                                 int vector_id, bool en,
4106                                 struct hnae3_ring_chain_node *ring_chain)
4107 {
4108         struct hclge_dev *hdev = vport->back;
4109         struct hnae3_ring_chain_node *node;
4110         struct hclge_desc desc;
4111         struct hclge_ctrl_vector_chain_cmd *req
4112                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4113         enum hclge_cmd_status status;
4114         enum hclge_opcode_type op;
4115         u16 tqp_type_and_id;
4116         int i;
4117
4118         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4119         hclge_cmd_setup_basic_desc(&desc, op, false);
4120         req->int_vector_id = vector_id;
4121
4122         i = 0;
4123         for (node = ring_chain; node; node = node->next) {
4124                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4125                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4126                                 HCLGE_INT_TYPE_S,
4127                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4128                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4129                                 HCLGE_TQP_ID_S, node->tqp_index);
4130                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4131                                 HCLGE_INT_GL_IDX_S,
4132                                 hnae3_get_field(node->int_gl_idx,
4133                                                 HNAE3_RING_GL_IDX_M,
4134                                                 HNAE3_RING_GL_IDX_S));
4135                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4136                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4137                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4138                         req->vfid = vport->vport_id;
4139
4140                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4141                         if (status) {
4142                                 dev_err(&hdev->pdev->dev,
4143                                         "Map TQP fail, status is %d.\n",
4144                                         status);
4145                                 return -EIO;
4146                         }
4147                         i = 0;
4148
4149                         hclge_cmd_setup_basic_desc(&desc,
4150                                                    op,
4151                                                    false);
4152                         req->int_vector_id = vector_id;
4153                 }
4154         }
4155
4156         if (i > 0) {
4157                 req->int_cause_num = i;
4158                 req->vfid = vport->vport_id;
4159                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4160                 if (status) {
4161                         dev_err(&hdev->pdev->dev,
4162                                 "Map TQP fail, status is %d.\n", status);
4163                         return -EIO;
4164                 }
4165         }
4166
4167         return 0;
4168 }
4169
4170 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4171                                     struct hnae3_ring_chain_node *ring_chain)
4172 {
4173         struct hclge_vport *vport = hclge_get_vport(handle);
4174         struct hclge_dev *hdev = vport->back;
4175         int vector_id;
4176
4177         vector_id = hclge_get_vector_index(hdev, vector);
4178         if (vector_id < 0) {
4179                 dev_err(&hdev->pdev->dev,
4180                         "Get vector index fail. vector_id =%d\n", vector_id);
4181                 return vector_id;
4182         }
4183
4184         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4185 }
4186
4187 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4188                                        struct hnae3_ring_chain_node *ring_chain)
4189 {
4190         struct hclge_vport *vport = hclge_get_vport(handle);
4191         struct hclge_dev *hdev = vport->back;
4192         int vector_id, ret;
4193
4194         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4195                 return 0;
4196
4197         vector_id = hclge_get_vector_index(hdev, vector);
4198         if (vector_id < 0) {
4199                 dev_err(&handle->pdev->dev,
4200                         "Get vector index fail. ret =%d\n", vector_id);
4201                 return vector_id;
4202         }
4203
4204         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4205         if (ret)
4206                 dev_err(&handle->pdev->dev,
4207                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4208                         vector_id, ret);
4209
4210         return ret;
4211 }
4212
4213 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4214                                struct hclge_promisc_param *param)
4215 {
4216         struct hclge_promisc_cfg_cmd *req;
4217         struct hclge_desc desc;
4218         int ret;
4219
4220         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4221
4222         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4223         req->vf_id = param->vf_id;
4224
4225         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4226          * pdev revision(0x20), new revision support them. The
4227          * value of this two fields will not return error when driver
4228          * send command to fireware in revision(0x20).
4229          */
4230         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4231                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4232
4233         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4234         if (ret)
4235                 dev_err(&hdev->pdev->dev,
4236                         "Set promisc mode fail, status is %d.\n", ret);
4237
4238         return ret;
4239 }
4240
4241 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4242                               bool en_mc, bool en_bc, int vport_id)
4243 {
4244         if (!param)
4245                 return;
4246
4247         memset(param, 0, sizeof(struct hclge_promisc_param));
4248         if (en_uc)
4249                 param->enable = HCLGE_PROMISC_EN_UC;
4250         if (en_mc)
4251                 param->enable |= HCLGE_PROMISC_EN_MC;
4252         if (en_bc)
4253                 param->enable |= HCLGE_PROMISC_EN_BC;
4254         param->vf_id = vport_id;
4255 }
4256
4257 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4258                                   bool en_mc_pmc)
4259 {
4260         struct hclge_vport *vport = hclge_get_vport(handle);
4261         struct hclge_dev *hdev = vport->back;
4262         struct hclge_promisc_param param;
4263         bool en_bc_pmc = true;
4264
4265         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4266          * always bypassed. So broadcast promisc should be disabled until
4267          * user enable promisc mode
4268          */
4269         if (handle->pdev->revision == 0x20)
4270                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4271
4272         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4273                                  vport->vport_id);
4274         return hclge_cmd_set_promisc_mode(hdev, &param);
4275 }
4276
4277 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4278 {
4279         struct hclge_get_fd_mode_cmd *req;
4280         struct hclge_desc desc;
4281         int ret;
4282
4283         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4284
4285         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4286
4287         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4288         if (ret) {
4289                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4290                 return ret;
4291         }
4292
4293         *fd_mode = req->mode;
4294
4295         return ret;
4296 }
4297
4298 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4299                                    u32 *stage1_entry_num,
4300                                    u32 *stage2_entry_num,
4301                                    u16 *stage1_counter_num,
4302                                    u16 *stage2_counter_num)
4303 {
4304         struct hclge_get_fd_allocation_cmd *req;
4305         struct hclge_desc desc;
4306         int ret;
4307
4308         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4309
4310         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4311
4312         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4313         if (ret) {
4314                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4315                         ret);
4316                 return ret;
4317         }
4318
4319         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4320         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4321         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4322         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4323
4324         return ret;
4325 }
4326
4327 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4328 {
4329         struct hclge_set_fd_key_config_cmd *req;
4330         struct hclge_fd_key_cfg *stage;
4331         struct hclge_desc desc;
4332         int ret;
4333
4334         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4335
4336         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4337         stage = &hdev->fd_cfg.key_cfg[stage_num];
4338         req->stage = stage_num;
4339         req->key_select = stage->key_sel;
4340         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4341         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4342         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4343         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4344         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4345         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4346
4347         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4348         if (ret)
4349                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4350
4351         return ret;
4352 }
4353
4354 static int hclge_init_fd_config(struct hclge_dev *hdev)
4355 {
4356 #define LOW_2_WORDS             0x03
4357         struct hclge_fd_key_cfg *key_cfg;
4358         int ret;
4359
4360         if (!hnae3_dev_fd_supported(hdev))
4361                 return 0;
4362
4363         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4364         if (ret)
4365                 return ret;
4366
4367         switch (hdev->fd_cfg.fd_mode) {
4368         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4369                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4370                 break;
4371         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4372                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4373                 break;
4374         default:
4375                 dev_err(&hdev->pdev->dev,
4376                         "Unsupported flow director mode %d\n",
4377                         hdev->fd_cfg.fd_mode);
4378                 return -EOPNOTSUPP;
4379         }
4380
4381         hdev->fd_cfg.proto_support =
4382                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4383                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4384         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4385         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4386         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4387         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4388         key_cfg->outer_sipv6_word_en = 0;
4389         key_cfg->outer_dipv6_word_en = 0;
4390
4391         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4392                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4393                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4394                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4395
4396         /* If use max 400bit key, we can support tuples for ether type */
4397         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4398                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4399                 key_cfg->tuple_active |=
4400                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4401         }
4402
4403         /* roce_type is used to filter roce frames
4404          * dst_vport is used to specify the rule
4405          */
4406         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4407
4408         ret = hclge_get_fd_allocation(hdev,
4409                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4410                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4411                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4412                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4413         if (ret)
4414                 return ret;
4415
4416         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4417 }
4418
4419 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4420                                 int loc, u8 *key, bool is_add)
4421 {
4422         struct hclge_fd_tcam_config_1_cmd *req1;
4423         struct hclge_fd_tcam_config_2_cmd *req2;
4424         struct hclge_fd_tcam_config_3_cmd *req3;
4425         struct hclge_desc desc[3];
4426         int ret;
4427
4428         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4429         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4430         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4431         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4432         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4433
4434         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4435         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4436         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4437
4438         req1->stage = stage;
4439         req1->xy_sel = sel_x ? 1 : 0;
4440         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4441         req1->index = cpu_to_le32(loc);
4442         req1->entry_vld = sel_x ? is_add : 0;
4443
4444         if (key) {
4445                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4446                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4447                        sizeof(req2->tcam_data));
4448                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4449                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4450         }
4451
4452         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4453         if (ret)
4454                 dev_err(&hdev->pdev->dev,
4455                         "config tcam key fail, ret=%d\n",
4456                         ret);
4457
4458         return ret;
4459 }
4460
4461 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4462                               struct hclge_fd_ad_data *action)
4463 {
4464         struct hclge_fd_ad_config_cmd *req;
4465         struct hclge_desc desc;
4466         u64 ad_data = 0;
4467         int ret;
4468
4469         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4470
4471         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4472         req->index = cpu_to_le32(loc);
4473         req->stage = stage;
4474
4475         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4476                       action->write_rule_id_to_bd);
4477         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4478                         action->rule_id);
4479         ad_data <<= 32;
4480         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4481         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4482                       action->forward_to_direct_queue);
4483         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4484                         action->queue_id);
4485         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4486         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4487                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4488         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4489         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4490                         action->counter_id);
4491
4492         req->ad_data = cpu_to_le64(ad_data);
4493         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4494         if (ret)
4495                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4496
4497         return ret;
4498 }
4499
4500 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4501                                    struct hclge_fd_rule *rule)
4502 {
4503         u16 tmp_x_s, tmp_y_s;
4504         u32 tmp_x_l, tmp_y_l;
4505         int i;
4506
4507         if (rule->unused_tuple & tuple_bit)
4508                 return true;
4509
4510         switch (tuple_bit) {
4511         case 0:
4512                 return false;
4513         case BIT(INNER_DST_MAC):
4514                 for (i = 0; i < ETH_ALEN; i++) {
4515                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4516                                rule->tuples_mask.dst_mac[i]);
4517                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4518                                rule->tuples_mask.dst_mac[i]);
4519                 }
4520
4521                 return true;
4522         case BIT(INNER_SRC_MAC):
4523                 for (i = 0; i < ETH_ALEN; i++) {
4524                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4525                                rule->tuples.src_mac[i]);
4526                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4527                                rule->tuples.src_mac[i]);
4528                 }
4529
4530                 return true;
4531         case BIT(INNER_VLAN_TAG_FST):
4532                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4533                        rule->tuples_mask.vlan_tag1);
4534                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4535                        rule->tuples_mask.vlan_tag1);
4536                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4537                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4538
4539                 return true;
4540         case BIT(INNER_ETH_TYPE):
4541                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4542                        rule->tuples_mask.ether_proto);
4543                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4544                        rule->tuples_mask.ether_proto);
4545                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4546                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4547
4548                 return true;
4549         case BIT(INNER_IP_TOS):
4550                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4551                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4552
4553                 return true;
4554         case BIT(INNER_IP_PROTO):
4555                 calc_x(*key_x, rule->tuples.ip_proto,
4556                        rule->tuples_mask.ip_proto);
4557                 calc_y(*key_y, rule->tuples.ip_proto,
4558                        rule->tuples_mask.ip_proto);
4559
4560                 return true;
4561         case BIT(INNER_SRC_IP):
4562                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4563                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4564                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4565                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4566                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4567                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4568
4569                 return true;
4570         case BIT(INNER_DST_IP):
4571                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4572                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4573                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4574                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4575                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4576                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4577
4578                 return true;
4579         case BIT(INNER_SRC_PORT):
4580                 calc_x(tmp_x_s, rule->tuples.src_port,
4581                        rule->tuples_mask.src_port);
4582                 calc_y(tmp_y_s, rule->tuples.src_port,
4583                        rule->tuples_mask.src_port);
4584                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4585                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4586
4587                 return true;
4588         case BIT(INNER_DST_PORT):
4589                 calc_x(tmp_x_s, rule->tuples.dst_port,
4590                        rule->tuples_mask.dst_port);
4591                 calc_y(tmp_y_s, rule->tuples.dst_port,
4592                        rule->tuples_mask.dst_port);
4593                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4594                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4595
4596                 return true;
4597         default:
4598                 return false;
4599         }
4600 }
4601
4602 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4603                                  u8 vf_id, u8 network_port_id)
4604 {
4605         u32 port_number = 0;
4606
4607         if (port_type == HOST_PORT) {
4608                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4609                                 pf_id);
4610                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4611                                 vf_id);
4612                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4613         } else {
4614                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4615                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4616                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4617         }
4618
4619         return port_number;
4620 }
4621
4622 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4623                                        __le32 *key_x, __le32 *key_y,
4624                                        struct hclge_fd_rule *rule)
4625 {
4626         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4627         u8 cur_pos = 0, tuple_size, shift_bits;
4628         unsigned int i;
4629
4630         for (i = 0; i < MAX_META_DATA; i++) {
4631                 tuple_size = meta_data_key_info[i].key_length;
4632                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4633
4634                 switch (tuple_bit) {
4635                 case BIT(ROCE_TYPE):
4636                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4637                         cur_pos += tuple_size;
4638                         break;
4639                 case BIT(DST_VPORT):
4640                         port_number = hclge_get_port_number(HOST_PORT, 0,
4641                                                             rule->vf_id, 0);
4642                         hnae3_set_field(meta_data,
4643                                         GENMASK(cur_pos + tuple_size, cur_pos),
4644                                         cur_pos, port_number);
4645                         cur_pos += tuple_size;
4646                         break;
4647                 default:
4648                         break;
4649                 }
4650         }
4651
4652         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4653         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4654         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4655
4656         *key_x = cpu_to_le32(tmp_x << shift_bits);
4657         *key_y = cpu_to_le32(tmp_y << shift_bits);
4658 }
4659
4660 /* A complete key is combined with meta data key and tuple key.
4661  * Meta data key is stored at the MSB region, and tuple key is stored at
4662  * the LSB region, unused bits will be filled 0.
4663  */
4664 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4665                             struct hclge_fd_rule *rule)
4666 {
4667         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4668         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4669         u8 *cur_key_x, *cur_key_y;
4670         unsigned int i;
4671         int ret, tuple_size;
4672         u8 meta_data_region;
4673
4674         memset(key_x, 0, sizeof(key_x));
4675         memset(key_y, 0, sizeof(key_y));
4676         cur_key_x = key_x;
4677         cur_key_y = key_y;
4678
4679         for (i = 0 ; i < MAX_TUPLE; i++) {
4680                 bool tuple_valid;
4681                 u32 check_tuple;
4682
4683                 tuple_size = tuple_key_info[i].key_length / 8;
4684                 check_tuple = key_cfg->tuple_active & BIT(i);
4685
4686                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4687                                                      cur_key_y, rule);
4688                 if (tuple_valid) {
4689                         cur_key_x += tuple_size;
4690                         cur_key_y += tuple_size;
4691                 }
4692         }
4693
4694         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4695                         MAX_META_DATA_LENGTH / 8;
4696
4697         hclge_fd_convert_meta_data(key_cfg,
4698                                    (__le32 *)(key_x + meta_data_region),
4699                                    (__le32 *)(key_y + meta_data_region),
4700                                    rule);
4701
4702         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4703                                    true);
4704         if (ret) {
4705                 dev_err(&hdev->pdev->dev,
4706                         "fd key_y config fail, loc=%d, ret=%d\n",
4707                         rule->queue_id, ret);
4708                 return ret;
4709         }
4710
4711         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4712                                    true);
4713         if (ret)
4714                 dev_err(&hdev->pdev->dev,
4715                         "fd key_x config fail, loc=%d, ret=%d\n",
4716                         rule->queue_id, ret);
4717         return ret;
4718 }
4719
4720 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4721                                struct hclge_fd_rule *rule)
4722 {
4723         struct hclge_fd_ad_data ad_data;
4724
4725         ad_data.ad_id = rule->location;
4726
4727         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4728                 ad_data.drop_packet = true;
4729                 ad_data.forward_to_direct_queue = false;
4730                 ad_data.queue_id = 0;
4731         } else {
4732                 ad_data.drop_packet = false;
4733                 ad_data.forward_to_direct_queue = true;
4734                 ad_data.queue_id = rule->queue_id;
4735         }
4736
4737         ad_data.use_counter = false;
4738         ad_data.counter_id = 0;
4739
4740         ad_data.use_next_stage = false;
4741         ad_data.next_input_key = 0;
4742
4743         ad_data.write_rule_id_to_bd = true;
4744         ad_data.rule_id = rule->location;
4745
4746         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4747 }
4748
4749 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4750                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4751 {
4752         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4753         struct ethtool_usrip4_spec *usr_ip4_spec;
4754         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4755         struct ethtool_usrip6_spec *usr_ip6_spec;
4756         struct ethhdr *ether_spec;
4757
4758         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4759                 return -EINVAL;
4760
4761         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4762                 return -EOPNOTSUPP;
4763
4764         if ((fs->flow_type & FLOW_EXT) &&
4765             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4766                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4767                 return -EOPNOTSUPP;
4768         }
4769
4770         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4771         case SCTP_V4_FLOW:
4772         case TCP_V4_FLOW:
4773         case UDP_V4_FLOW:
4774                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4775                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4776
4777                 if (!tcp_ip4_spec->ip4src)
4778                         *unused |= BIT(INNER_SRC_IP);
4779
4780                 if (!tcp_ip4_spec->ip4dst)
4781                         *unused |= BIT(INNER_DST_IP);
4782
4783                 if (!tcp_ip4_spec->psrc)
4784                         *unused |= BIT(INNER_SRC_PORT);
4785
4786                 if (!tcp_ip4_spec->pdst)
4787                         *unused |= BIT(INNER_DST_PORT);
4788
4789                 if (!tcp_ip4_spec->tos)
4790                         *unused |= BIT(INNER_IP_TOS);
4791
4792                 break;
4793         case IP_USER_FLOW:
4794                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4795                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4796                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4797
4798                 if (!usr_ip4_spec->ip4src)
4799                         *unused |= BIT(INNER_SRC_IP);
4800
4801                 if (!usr_ip4_spec->ip4dst)
4802                         *unused |= BIT(INNER_DST_IP);
4803
4804                 if (!usr_ip4_spec->tos)
4805                         *unused |= BIT(INNER_IP_TOS);
4806
4807                 if (!usr_ip4_spec->proto)
4808                         *unused |= BIT(INNER_IP_PROTO);
4809
4810                 if (usr_ip4_spec->l4_4_bytes)
4811                         return -EOPNOTSUPP;
4812
4813                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4814                         return -EOPNOTSUPP;
4815
4816                 break;
4817         case SCTP_V6_FLOW:
4818         case TCP_V6_FLOW:
4819         case UDP_V6_FLOW:
4820                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4821                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4822                         BIT(INNER_IP_TOS);
4823
4824                 /* check whether src/dst ip address used */
4825                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4826                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4827                         *unused |= BIT(INNER_SRC_IP);
4828
4829                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4830                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4831                         *unused |= BIT(INNER_DST_IP);
4832
4833                 if (!tcp_ip6_spec->psrc)
4834                         *unused |= BIT(INNER_SRC_PORT);
4835
4836                 if (!tcp_ip6_spec->pdst)
4837                         *unused |= BIT(INNER_DST_PORT);
4838
4839                 if (tcp_ip6_spec->tclass)
4840                         return -EOPNOTSUPP;
4841
4842                 break;
4843         case IPV6_USER_FLOW:
4844                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4845                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4846                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4847                         BIT(INNER_DST_PORT);
4848
4849                 /* check whether src/dst ip address used */
4850                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4851                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4852                         *unused |= BIT(INNER_SRC_IP);
4853
4854                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4855                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4856                         *unused |= BIT(INNER_DST_IP);
4857
4858                 if (!usr_ip6_spec->l4_proto)
4859                         *unused |= BIT(INNER_IP_PROTO);
4860
4861                 if (usr_ip6_spec->tclass)
4862                         return -EOPNOTSUPP;
4863
4864                 if (usr_ip6_spec->l4_4_bytes)
4865                         return -EOPNOTSUPP;
4866
4867                 break;
4868         case ETHER_FLOW:
4869                 ether_spec = &fs->h_u.ether_spec;
4870                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4871                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4872                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4873
4874                 if (is_zero_ether_addr(ether_spec->h_source))
4875                         *unused |= BIT(INNER_SRC_MAC);
4876
4877                 if (is_zero_ether_addr(ether_spec->h_dest))
4878                         *unused |= BIT(INNER_DST_MAC);
4879
4880                 if (!ether_spec->h_proto)
4881                         *unused |= BIT(INNER_ETH_TYPE);
4882
4883                 break;
4884         default:
4885                 return -EOPNOTSUPP;
4886         }
4887
4888         if ((fs->flow_type & FLOW_EXT)) {
4889                 if (fs->h_ext.vlan_etype)
4890                         return -EOPNOTSUPP;
4891                 if (!fs->h_ext.vlan_tci)
4892                         *unused |= BIT(INNER_VLAN_TAG_FST);
4893
4894                 if (fs->m_ext.vlan_tci) {
4895                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4896                                 return -EINVAL;
4897                 }
4898         } else {
4899                 *unused |= BIT(INNER_VLAN_TAG_FST);
4900         }
4901
4902         if (fs->flow_type & FLOW_MAC_EXT) {
4903                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4904                         return -EOPNOTSUPP;
4905
4906                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4907                         *unused |= BIT(INNER_DST_MAC);
4908                 else
4909                         *unused &= ~(BIT(INNER_DST_MAC));
4910         }
4911
4912         return 0;
4913 }
4914
4915 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4916 {
4917         struct hclge_fd_rule *rule = NULL;
4918         struct hlist_node *node2;
4919
4920         spin_lock_bh(&hdev->fd_rule_lock);
4921         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4922                 if (rule->location >= location)
4923                         break;
4924         }
4925
4926         spin_unlock_bh(&hdev->fd_rule_lock);
4927
4928         return  rule && rule->location == location;
4929 }
4930
4931 /* make sure being called after lock up with fd_rule_lock */
4932 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4933                                      struct hclge_fd_rule *new_rule,
4934                                      u16 location,
4935                                      bool is_add)
4936 {
4937         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4938         struct hlist_node *node2;
4939
4940         if (is_add && !new_rule)
4941                 return -EINVAL;
4942
4943         hlist_for_each_entry_safe(rule, node2,
4944                                   &hdev->fd_rule_list, rule_node) {
4945                 if (rule->location >= location)
4946                         break;
4947                 parent = rule;
4948         }
4949
4950         if (rule && rule->location == location) {
4951                 hlist_del(&rule->rule_node);
4952                 kfree(rule);
4953                 hdev->hclge_fd_rule_num--;
4954
4955                 if (!is_add) {
4956                         if (!hdev->hclge_fd_rule_num)
4957                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4958                         clear_bit(location, hdev->fd_bmap);
4959
4960                         return 0;
4961                 }
4962         } else if (!is_add) {
4963                 dev_err(&hdev->pdev->dev,
4964                         "delete fail, rule %d is inexistent\n",
4965                         location);
4966                 return -EINVAL;
4967         }
4968
4969         INIT_HLIST_NODE(&new_rule->rule_node);
4970
4971         if (parent)
4972                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4973         else
4974                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4975
4976         set_bit(location, hdev->fd_bmap);
4977         hdev->hclge_fd_rule_num++;
4978         hdev->fd_active_type = new_rule->rule_type;
4979
4980         return 0;
4981 }
4982
4983 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4984                               struct ethtool_rx_flow_spec *fs,
4985                               struct hclge_fd_rule *rule)
4986 {
4987         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4988
4989         switch (flow_type) {
4990         case SCTP_V4_FLOW:
4991         case TCP_V4_FLOW:
4992         case UDP_V4_FLOW:
4993                 rule->tuples.src_ip[IPV4_INDEX] =
4994                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4995                 rule->tuples_mask.src_ip[IPV4_INDEX] =
4996                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4997
4998                 rule->tuples.dst_ip[IPV4_INDEX] =
4999                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5000                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5001                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5002
5003                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5004                 rule->tuples_mask.src_port =
5005                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5006
5007                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5008                 rule->tuples_mask.dst_port =
5009                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5010
5011                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5012                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5013
5014                 rule->tuples.ether_proto = ETH_P_IP;
5015                 rule->tuples_mask.ether_proto = 0xFFFF;
5016
5017                 break;
5018         case IP_USER_FLOW:
5019                 rule->tuples.src_ip[IPV4_INDEX] =
5020                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5021                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5022                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5023
5024                 rule->tuples.dst_ip[IPV4_INDEX] =
5025                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5026                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5027                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5028
5029                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5030                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5031
5032                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5033                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5034
5035                 rule->tuples.ether_proto = ETH_P_IP;
5036                 rule->tuples_mask.ether_proto = 0xFFFF;
5037
5038                 break;
5039         case SCTP_V6_FLOW:
5040         case TCP_V6_FLOW:
5041         case UDP_V6_FLOW:
5042                 be32_to_cpu_array(rule->tuples.src_ip,
5043                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5044                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5045                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5046
5047                 be32_to_cpu_array(rule->tuples.dst_ip,
5048                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5049                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5050                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5051
5052                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5053                 rule->tuples_mask.src_port =
5054                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5055
5056                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5057                 rule->tuples_mask.dst_port =
5058                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5059
5060                 rule->tuples.ether_proto = ETH_P_IPV6;
5061                 rule->tuples_mask.ether_proto = 0xFFFF;
5062
5063                 break;
5064         case IPV6_USER_FLOW:
5065                 be32_to_cpu_array(rule->tuples.src_ip,
5066                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5067                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5068                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5069
5070                 be32_to_cpu_array(rule->tuples.dst_ip,
5071                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5072                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5073                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5074
5075                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5076                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5077
5078                 rule->tuples.ether_proto = ETH_P_IPV6;
5079                 rule->tuples_mask.ether_proto = 0xFFFF;
5080
5081                 break;
5082         case ETHER_FLOW:
5083                 ether_addr_copy(rule->tuples.src_mac,
5084                                 fs->h_u.ether_spec.h_source);
5085                 ether_addr_copy(rule->tuples_mask.src_mac,
5086                                 fs->m_u.ether_spec.h_source);
5087
5088                 ether_addr_copy(rule->tuples.dst_mac,
5089                                 fs->h_u.ether_spec.h_dest);
5090                 ether_addr_copy(rule->tuples_mask.dst_mac,
5091                                 fs->m_u.ether_spec.h_dest);
5092
5093                 rule->tuples.ether_proto =
5094                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5095                 rule->tuples_mask.ether_proto =
5096                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5097
5098                 break;
5099         default:
5100                 return -EOPNOTSUPP;
5101         }
5102
5103         switch (flow_type) {
5104         case SCTP_V4_FLOW:
5105         case SCTP_V6_FLOW:
5106                 rule->tuples.ip_proto = IPPROTO_SCTP;
5107                 rule->tuples_mask.ip_proto = 0xFF;
5108                 break;
5109         case TCP_V4_FLOW:
5110         case TCP_V6_FLOW:
5111                 rule->tuples.ip_proto = IPPROTO_TCP;
5112                 rule->tuples_mask.ip_proto = 0xFF;
5113                 break;
5114         case UDP_V4_FLOW:
5115         case UDP_V6_FLOW:
5116                 rule->tuples.ip_proto = IPPROTO_UDP;
5117                 rule->tuples_mask.ip_proto = 0xFF;
5118                 break;
5119         default:
5120                 break;
5121         }
5122
5123         if ((fs->flow_type & FLOW_EXT)) {
5124                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5125                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5126         }
5127
5128         if (fs->flow_type & FLOW_MAC_EXT) {
5129                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5130                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5131         }
5132
5133         return 0;
5134 }
5135
5136 /* make sure being called after lock up with fd_rule_lock */
5137 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5138                                 struct hclge_fd_rule *rule)
5139 {
5140         int ret;
5141
5142         if (!rule) {
5143                 dev_err(&hdev->pdev->dev,
5144                         "The flow director rule is NULL\n");
5145                 return -EINVAL;
5146         }
5147
5148         /* it will never fail here, so needn't to check return value */
5149         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5150
5151         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5152         if (ret)
5153                 goto clear_rule;
5154
5155         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5156         if (ret)
5157                 goto clear_rule;
5158
5159         return 0;
5160
5161 clear_rule:
5162         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5163         return ret;
5164 }
5165
5166 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5167                               struct ethtool_rxnfc *cmd)
5168 {
5169         struct hclge_vport *vport = hclge_get_vport(handle);
5170         struct hclge_dev *hdev = vport->back;
5171         u16 dst_vport_id = 0, q_index = 0;
5172         struct ethtool_rx_flow_spec *fs;
5173         struct hclge_fd_rule *rule;
5174         u32 unused = 0;
5175         u8 action;
5176         int ret;
5177
5178         if (!hnae3_dev_fd_supported(hdev))
5179                 return -EOPNOTSUPP;
5180
5181         if (!hdev->fd_en) {
5182                 dev_warn(&hdev->pdev->dev,
5183                          "Please enable flow director first\n");
5184                 return -EOPNOTSUPP;
5185         }
5186
5187         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5188
5189         ret = hclge_fd_check_spec(hdev, fs, &unused);
5190         if (ret) {
5191                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5192                 return ret;
5193         }
5194
5195         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5196                 action = HCLGE_FD_ACTION_DROP_PACKET;
5197         } else {
5198                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5199                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5200                 u16 tqps;
5201
5202                 if (vf > hdev->num_req_vfs) {
5203                         dev_err(&hdev->pdev->dev,
5204                                 "Error: vf id (%d) > max vf num (%d)\n",
5205                                 vf, hdev->num_req_vfs);
5206                         return -EINVAL;
5207                 }
5208
5209                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5210                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5211
5212                 if (ring >= tqps) {
5213                         dev_err(&hdev->pdev->dev,
5214                                 "Error: queue id (%d) > max tqp num (%d)\n",
5215                                 ring, tqps - 1);
5216                         return -EINVAL;
5217                 }
5218
5219                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5220                 q_index = ring;
5221         }
5222
5223         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5224         if (!rule)
5225                 return -ENOMEM;
5226
5227         ret = hclge_fd_get_tuple(hdev, fs, rule);
5228         if (ret) {
5229                 kfree(rule);
5230                 return ret;
5231         }
5232
5233         rule->flow_type = fs->flow_type;
5234
5235         rule->location = fs->location;
5236         rule->unused_tuple = unused;
5237         rule->vf_id = dst_vport_id;
5238         rule->queue_id = q_index;
5239         rule->action = action;
5240         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5241
5242         /* to avoid rule conflict, when user configure rule by ethtool,
5243          * we need to clear all arfs rules
5244          */
5245         hclge_clear_arfs_rules(handle);
5246
5247         spin_lock_bh(&hdev->fd_rule_lock);
5248         ret = hclge_fd_config_rule(hdev, rule);
5249
5250         spin_unlock_bh(&hdev->fd_rule_lock);
5251
5252         return ret;
5253 }
5254
5255 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5256                               struct ethtool_rxnfc *cmd)
5257 {
5258         struct hclge_vport *vport = hclge_get_vport(handle);
5259         struct hclge_dev *hdev = vport->back;
5260         struct ethtool_rx_flow_spec *fs;
5261         int ret;
5262
5263         if (!hnae3_dev_fd_supported(hdev))
5264                 return -EOPNOTSUPP;
5265
5266         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5267
5268         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5269                 return -EINVAL;
5270
5271         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5272                 dev_err(&hdev->pdev->dev,
5273                         "Delete fail, rule %d is inexistent\n", fs->location);
5274                 return -ENOENT;
5275         }
5276
5277         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5278                                    NULL, false);
5279         if (ret)
5280                 return ret;
5281
5282         spin_lock_bh(&hdev->fd_rule_lock);
5283         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5284
5285         spin_unlock_bh(&hdev->fd_rule_lock);
5286
5287         return ret;
5288 }
5289
5290 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5291                                      bool clear_list)
5292 {
5293         struct hclge_vport *vport = hclge_get_vport(handle);
5294         struct hclge_dev *hdev = vport->back;
5295         struct hclge_fd_rule *rule;
5296         struct hlist_node *node;
5297         u16 location;
5298
5299         if (!hnae3_dev_fd_supported(hdev))
5300                 return;
5301
5302         spin_lock_bh(&hdev->fd_rule_lock);
5303         for_each_set_bit(location, hdev->fd_bmap,
5304                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5305                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5306                                      NULL, false);
5307
5308         if (clear_list) {
5309                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5310                                           rule_node) {
5311                         hlist_del(&rule->rule_node);
5312                         kfree(rule);
5313                 }
5314                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5315                 hdev->hclge_fd_rule_num = 0;
5316                 bitmap_zero(hdev->fd_bmap,
5317                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5318         }
5319
5320         spin_unlock_bh(&hdev->fd_rule_lock);
5321 }
5322
5323 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5324 {
5325         struct hclge_vport *vport = hclge_get_vport(handle);
5326         struct hclge_dev *hdev = vport->back;
5327         struct hclge_fd_rule *rule;
5328         struct hlist_node *node;
5329         int ret;
5330
5331         /* Return ok here, because reset error handling will check this
5332          * return value. If error is returned here, the reset process will
5333          * fail.
5334          */
5335         if (!hnae3_dev_fd_supported(hdev))
5336                 return 0;
5337
5338         /* if fd is disabled, should not restore it when reset */
5339         if (!hdev->fd_en)
5340                 return 0;
5341
5342         spin_lock_bh(&hdev->fd_rule_lock);
5343         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5344                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5345                 if (!ret)
5346                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5347
5348                 if (ret) {
5349                         dev_warn(&hdev->pdev->dev,
5350                                  "Restore rule %d failed, remove it\n",
5351                                  rule->location);
5352                         clear_bit(rule->location, hdev->fd_bmap);
5353                         hlist_del(&rule->rule_node);
5354                         kfree(rule);
5355                         hdev->hclge_fd_rule_num--;
5356                 }
5357         }
5358
5359         if (hdev->hclge_fd_rule_num)
5360                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5361
5362         spin_unlock_bh(&hdev->fd_rule_lock);
5363
5364         return 0;
5365 }
5366
5367 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5368                                  struct ethtool_rxnfc *cmd)
5369 {
5370         struct hclge_vport *vport = hclge_get_vport(handle);
5371         struct hclge_dev *hdev = vport->back;
5372
5373         if (!hnae3_dev_fd_supported(hdev))
5374                 return -EOPNOTSUPP;
5375
5376         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5377         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5378
5379         return 0;
5380 }
5381
5382 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5383                                   struct ethtool_rxnfc *cmd)
5384 {
5385         struct hclge_vport *vport = hclge_get_vport(handle);
5386         struct hclge_fd_rule *rule = NULL;
5387         struct hclge_dev *hdev = vport->back;
5388         struct ethtool_rx_flow_spec *fs;
5389         struct hlist_node *node2;
5390
5391         if (!hnae3_dev_fd_supported(hdev))
5392                 return -EOPNOTSUPP;
5393
5394         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5395
5396         spin_lock_bh(&hdev->fd_rule_lock);
5397
5398         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5399                 if (rule->location >= fs->location)
5400                         break;
5401         }
5402
5403         if (!rule || fs->location != rule->location) {
5404                 spin_unlock_bh(&hdev->fd_rule_lock);
5405
5406                 return -ENOENT;
5407         }
5408
5409         fs->flow_type = rule->flow_type;
5410         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5411         case SCTP_V4_FLOW:
5412         case TCP_V4_FLOW:
5413         case UDP_V4_FLOW:
5414                 fs->h_u.tcp_ip4_spec.ip4src =
5415                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5416                 fs->m_u.tcp_ip4_spec.ip4src =
5417                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5418                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5419
5420                 fs->h_u.tcp_ip4_spec.ip4dst =
5421                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5422                 fs->m_u.tcp_ip4_spec.ip4dst =
5423                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5424                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5425
5426                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5427                 fs->m_u.tcp_ip4_spec.psrc =
5428                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5429                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5430
5431                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5432                 fs->m_u.tcp_ip4_spec.pdst =
5433                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5434                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5435
5436                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5437                 fs->m_u.tcp_ip4_spec.tos =
5438                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5439                                 0 : rule->tuples_mask.ip_tos;
5440
5441                 break;
5442         case IP_USER_FLOW:
5443                 fs->h_u.usr_ip4_spec.ip4src =
5444                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5445                 fs->m_u.tcp_ip4_spec.ip4src =
5446                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5447                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5448
5449                 fs->h_u.usr_ip4_spec.ip4dst =
5450                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5451                 fs->m_u.usr_ip4_spec.ip4dst =
5452                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5453                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5454
5455                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5456                 fs->m_u.usr_ip4_spec.tos =
5457                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5458                                 0 : rule->tuples_mask.ip_tos;
5459
5460                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5461                 fs->m_u.usr_ip4_spec.proto =
5462                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5463                                 0 : rule->tuples_mask.ip_proto;
5464
5465                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5466
5467                 break;
5468         case SCTP_V6_FLOW:
5469         case TCP_V6_FLOW:
5470         case UDP_V6_FLOW:
5471                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5472                                   rule->tuples.src_ip, IPV6_SIZE);
5473                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5474                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5475                                sizeof(int) * IPV6_SIZE);
5476                 else
5477                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5478                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5479
5480                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5481                                   rule->tuples.dst_ip, IPV6_SIZE);
5482                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5483                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5484                                sizeof(int) * IPV6_SIZE);
5485                 else
5486                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5487                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5488
5489                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5490                 fs->m_u.tcp_ip6_spec.psrc =
5491                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5492                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5493
5494                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5495                 fs->m_u.tcp_ip6_spec.pdst =
5496                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5497                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5498
5499                 break;
5500         case IPV6_USER_FLOW:
5501                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5502                                   rule->tuples.src_ip, IPV6_SIZE);
5503                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5504                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5505                                sizeof(int) * IPV6_SIZE);
5506                 else
5507                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5508                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5509
5510                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5511                                   rule->tuples.dst_ip, IPV6_SIZE);
5512                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5513                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5514                                sizeof(int) * IPV6_SIZE);
5515                 else
5516                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5517                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5518
5519                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5520                 fs->m_u.usr_ip6_spec.l4_proto =
5521                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5522                                 0 : rule->tuples_mask.ip_proto;
5523
5524                 break;
5525         case ETHER_FLOW:
5526                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5527                                 rule->tuples.src_mac);
5528                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5529                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5530                 else
5531                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5532                                         rule->tuples_mask.src_mac);
5533
5534                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5535                                 rule->tuples.dst_mac);
5536                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5537                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5538                 else
5539                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5540                                         rule->tuples_mask.dst_mac);
5541
5542                 fs->h_u.ether_spec.h_proto =
5543                                 cpu_to_be16(rule->tuples.ether_proto);
5544                 fs->m_u.ether_spec.h_proto =
5545                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5546                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5547
5548                 break;
5549         default:
5550                 spin_unlock_bh(&hdev->fd_rule_lock);
5551                 return -EOPNOTSUPP;
5552         }
5553
5554         if (fs->flow_type & FLOW_EXT) {
5555                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5556                 fs->m_ext.vlan_tci =
5557                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5558                                 cpu_to_be16(VLAN_VID_MASK) :
5559                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5560         }
5561
5562         if (fs->flow_type & FLOW_MAC_EXT) {
5563                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5564                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5565                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5566                 else
5567                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5568                                         rule->tuples_mask.dst_mac);
5569         }
5570
5571         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5572                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5573         } else {
5574                 u64 vf_id;
5575
5576                 fs->ring_cookie = rule->queue_id;
5577                 vf_id = rule->vf_id;
5578                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5579                 fs->ring_cookie |= vf_id;
5580         }
5581
5582         spin_unlock_bh(&hdev->fd_rule_lock);
5583
5584         return 0;
5585 }
5586
5587 static int hclge_get_all_rules(struct hnae3_handle *handle,
5588                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5589 {
5590         struct hclge_vport *vport = hclge_get_vport(handle);
5591         struct hclge_dev *hdev = vport->back;
5592         struct hclge_fd_rule *rule;
5593         struct hlist_node *node2;
5594         int cnt = 0;
5595
5596         if (!hnae3_dev_fd_supported(hdev))
5597                 return -EOPNOTSUPP;
5598
5599         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5600
5601         spin_lock_bh(&hdev->fd_rule_lock);
5602         hlist_for_each_entry_safe(rule, node2,
5603                                   &hdev->fd_rule_list, rule_node) {
5604                 if (cnt == cmd->rule_cnt) {
5605                         spin_unlock_bh(&hdev->fd_rule_lock);
5606                         return -EMSGSIZE;
5607                 }
5608
5609                 rule_locs[cnt] = rule->location;
5610                 cnt++;
5611         }
5612
5613         spin_unlock_bh(&hdev->fd_rule_lock);
5614
5615         cmd->rule_cnt = cnt;
5616
5617         return 0;
5618 }
5619
5620 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5621                                      struct hclge_fd_rule_tuples *tuples)
5622 {
5623         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5624         tuples->ip_proto = fkeys->basic.ip_proto;
5625         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5626
5627         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5628                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5629                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5630         } else {
5631                 memcpy(tuples->src_ip,
5632                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5633                        sizeof(tuples->src_ip));
5634                 memcpy(tuples->dst_ip,
5635                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5636                        sizeof(tuples->dst_ip));
5637         }
5638 }
5639
5640 /* traverse all rules, check whether an existed rule has the same tuples */
5641 static struct hclge_fd_rule *
5642 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5643                           const struct hclge_fd_rule_tuples *tuples)
5644 {
5645         struct hclge_fd_rule *rule = NULL;
5646         struct hlist_node *node;
5647
5648         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5649                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5650                         return rule;
5651         }
5652
5653         return NULL;
5654 }
5655
5656 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5657                                      struct hclge_fd_rule *rule)
5658 {
5659         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5660                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5661                              BIT(INNER_SRC_PORT);
5662         rule->action = 0;
5663         rule->vf_id = 0;
5664         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5665         if (tuples->ether_proto == ETH_P_IP) {
5666                 if (tuples->ip_proto == IPPROTO_TCP)
5667                         rule->flow_type = TCP_V4_FLOW;
5668                 else
5669                         rule->flow_type = UDP_V4_FLOW;
5670         } else {
5671                 if (tuples->ip_proto == IPPROTO_TCP)
5672                         rule->flow_type = TCP_V6_FLOW;
5673                 else
5674                         rule->flow_type = UDP_V6_FLOW;
5675         }
5676         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5677         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5678 }
5679
5680 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5681                                       u16 flow_id, struct flow_keys *fkeys)
5682 {
5683         struct hclge_vport *vport = hclge_get_vport(handle);
5684         struct hclge_fd_rule_tuples new_tuples;
5685         struct hclge_dev *hdev = vport->back;
5686         struct hclge_fd_rule *rule;
5687         u16 tmp_queue_id;
5688         u16 bit_id;
5689         int ret;
5690
5691         if (!hnae3_dev_fd_supported(hdev))
5692                 return -EOPNOTSUPP;
5693
5694         memset(&new_tuples, 0, sizeof(new_tuples));
5695         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5696
5697         spin_lock_bh(&hdev->fd_rule_lock);
5698
5699         /* when there is already fd rule existed add by user,
5700          * arfs should not work
5701          */
5702         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5703                 spin_unlock_bh(&hdev->fd_rule_lock);
5704
5705                 return -EOPNOTSUPP;
5706         }
5707
5708         /* check is there flow director filter existed for this flow,
5709          * if not, create a new filter for it;
5710          * if filter exist with different queue id, modify the filter;
5711          * if filter exist with same queue id, do nothing
5712          */
5713         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5714         if (!rule) {
5715                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5716                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5717                         spin_unlock_bh(&hdev->fd_rule_lock);
5718
5719                         return -ENOSPC;
5720                 }
5721
5722                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5723                 if (!rule) {
5724                         spin_unlock_bh(&hdev->fd_rule_lock);
5725
5726                         return -ENOMEM;
5727                 }
5728
5729                 set_bit(bit_id, hdev->fd_bmap);
5730                 rule->location = bit_id;
5731                 rule->flow_id = flow_id;
5732                 rule->queue_id = queue_id;
5733                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5734                 ret = hclge_fd_config_rule(hdev, rule);
5735
5736                 spin_unlock_bh(&hdev->fd_rule_lock);
5737
5738                 if (ret)
5739                         return ret;
5740
5741                 return rule->location;
5742         }
5743
5744         spin_unlock_bh(&hdev->fd_rule_lock);
5745
5746         if (rule->queue_id == queue_id)
5747                 return rule->location;
5748
5749         tmp_queue_id = rule->queue_id;
5750         rule->queue_id = queue_id;
5751         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5752         if (ret) {
5753                 rule->queue_id = tmp_queue_id;
5754                 return ret;
5755         }
5756
5757         return rule->location;
5758 }
5759
5760 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5761 {
5762 #ifdef CONFIG_RFS_ACCEL
5763         struct hnae3_handle *handle = &hdev->vport[0].nic;
5764         struct hclge_fd_rule *rule;
5765         struct hlist_node *node;
5766         HLIST_HEAD(del_list);
5767
5768         spin_lock_bh(&hdev->fd_rule_lock);
5769         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5770                 spin_unlock_bh(&hdev->fd_rule_lock);
5771                 return;
5772         }
5773         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5774                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5775                                         rule->flow_id, rule->location)) {
5776                         hlist_del_init(&rule->rule_node);
5777                         hlist_add_head(&rule->rule_node, &del_list);
5778                         hdev->hclge_fd_rule_num--;
5779                         clear_bit(rule->location, hdev->fd_bmap);
5780                 }
5781         }
5782         spin_unlock_bh(&hdev->fd_rule_lock);
5783
5784         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5785                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5786                                      rule->location, NULL, false);
5787                 kfree(rule);
5788         }
5789 #endif
5790 }
5791
5792 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5793 {
5794 #ifdef CONFIG_RFS_ACCEL
5795         struct hclge_vport *vport = hclge_get_vport(handle);
5796         struct hclge_dev *hdev = vport->back;
5797
5798         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5799                 hclge_del_all_fd_entries(handle, true);
5800 #endif
5801 }
5802
5803 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5804 {
5805         struct hclge_vport *vport = hclge_get_vport(handle);
5806         struct hclge_dev *hdev = vport->back;
5807
5808         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5809                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5810 }
5811
5812 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5813 {
5814         struct hclge_vport *vport = hclge_get_vport(handle);
5815         struct hclge_dev *hdev = vport->back;
5816
5817         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5818 }
5819
5820 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5821 {
5822         struct hclge_vport *vport = hclge_get_vport(handle);
5823         struct hclge_dev *hdev = vport->back;
5824
5825         return hdev->rst_stats.hw_reset_done_cnt;
5826 }
5827
5828 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5829 {
5830         struct hclge_vport *vport = hclge_get_vport(handle);
5831         struct hclge_dev *hdev = vport->back;
5832         bool clear;
5833
5834         hdev->fd_en = enable;
5835         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5836         if (!enable)
5837                 hclge_del_all_fd_entries(handle, clear);
5838         else
5839                 hclge_restore_fd_entries(handle);
5840 }
5841
5842 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5843 {
5844         struct hclge_desc desc;
5845         struct hclge_config_mac_mode_cmd *req =
5846                 (struct hclge_config_mac_mode_cmd *)desc.data;
5847         u32 loop_en = 0;
5848         int ret;
5849
5850         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5851         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5852         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5853         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5854         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5855         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5856         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5857         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5858         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5859         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5860         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5861         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5862         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5863         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5864         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5865         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5866
5867         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5868         if (ret)
5869                 dev_err(&hdev->pdev->dev,
5870                         "mac enable fail, ret =%d.\n", ret);
5871 }
5872
5873 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5874 {
5875         struct hclge_config_mac_mode_cmd *req;
5876         struct hclge_desc desc;
5877         u32 loop_en;
5878         int ret;
5879
5880         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5881         /* 1 Read out the MAC mode config at first */
5882         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5883         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5884         if (ret) {
5885                 dev_err(&hdev->pdev->dev,
5886                         "mac loopback get fail, ret =%d.\n", ret);
5887                 return ret;
5888         }
5889
5890         /* 2 Then setup the loopback flag */
5891         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5892         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5893         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5894         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5895
5896         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5897
5898         /* 3 Config mac work mode with loopback flag
5899          * and its original configure parameters
5900          */
5901         hclge_cmd_reuse_desc(&desc, false);
5902         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5903         if (ret)
5904                 dev_err(&hdev->pdev->dev,
5905                         "mac loopback set fail, ret =%d.\n", ret);
5906         return ret;
5907 }
5908
5909 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5910                                      enum hnae3_loop loop_mode)
5911 {
5912 #define HCLGE_SERDES_RETRY_MS   10
5913 #define HCLGE_SERDES_RETRY_NUM  100
5914
5915 #define HCLGE_MAC_LINK_STATUS_MS   10
5916 #define HCLGE_MAC_LINK_STATUS_NUM  100
5917 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5918 #define HCLGE_MAC_LINK_STATUS_UP   1
5919
5920         struct hclge_serdes_lb_cmd *req;
5921         struct hclge_desc desc;
5922         int mac_link_ret = 0;
5923         int ret, i = 0;
5924         u8 loop_mode_b;
5925
5926         req = (struct hclge_serdes_lb_cmd *)desc.data;
5927         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5928
5929         switch (loop_mode) {
5930         case HNAE3_LOOP_SERIAL_SERDES:
5931                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5932                 break;
5933         case HNAE3_LOOP_PARALLEL_SERDES:
5934                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5935                 break;
5936         default:
5937                 dev_err(&hdev->pdev->dev,
5938                         "unsupported serdes loopback mode %d\n", loop_mode);
5939                 return -ENOTSUPP;
5940         }
5941
5942         if (en) {
5943                 req->enable = loop_mode_b;
5944                 req->mask = loop_mode_b;
5945                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5946         } else {
5947                 req->mask = loop_mode_b;
5948                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5949         }
5950
5951         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5952         if (ret) {
5953                 dev_err(&hdev->pdev->dev,
5954                         "serdes loopback set fail, ret = %d\n", ret);
5955                 return ret;
5956         }
5957
5958         do {
5959                 msleep(HCLGE_SERDES_RETRY_MS);
5960                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5961                                            true);
5962                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5963                 if (ret) {
5964                         dev_err(&hdev->pdev->dev,
5965                                 "serdes loopback get, ret = %d\n", ret);
5966                         return ret;
5967                 }
5968         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5969                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5970
5971         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5972                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5973                 return -EBUSY;
5974         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5975                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5976                 return -EIO;
5977         }
5978
5979         hclge_cfg_mac_mode(hdev, en);
5980
5981         i = 0;
5982         do {
5983                 /* serdes Internal loopback, independent of the network cable.*/
5984                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5985                 ret = hclge_get_mac_link_status(hdev);
5986                 if (ret == mac_link_ret)
5987                         return 0;
5988         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5989
5990         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5991
5992         return -EBUSY;
5993 }
5994
5995 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
5996                             int stream_id, bool enable)
5997 {
5998         struct hclge_desc desc;
5999         struct hclge_cfg_com_tqp_queue_cmd *req =
6000                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6001         int ret;
6002
6003         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6004         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6005         req->stream_id = cpu_to_le16(stream_id);
6006         if (enable)
6007                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6008
6009         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6010         if (ret)
6011                 dev_err(&hdev->pdev->dev,
6012                         "Tqp enable fail, status =%d.\n", ret);
6013         return ret;
6014 }
6015
6016 static int hclge_set_loopback(struct hnae3_handle *handle,
6017                               enum hnae3_loop loop_mode, bool en)
6018 {
6019         struct hclge_vport *vport = hclge_get_vport(handle);
6020         struct hnae3_knic_private_info *kinfo;
6021         struct hclge_dev *hdev = vport->back;
6022         int i, ret;
6023
6024         switch (loop_mode) {
6025         case HNAE3_LOOP_APP:
6026                 ret = hclge_set_app_loopback(hdev, en);
6027                 break;
6028         case HNAE3_LOOP_SERIAL_SERDES:
6029         case HNAE3_LOOP_PARALLEL_SERDES:
6030                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6031                 break;
6032         default:
6033                 ret = -ENOTSUPP;
6034                 dev_err(&hdev->pdev->dev,
6035                         "loop_mode %d is not supported\n", loop_mode);
6036                 break;
6037         }
6038
6039         if (ret)
6040                 return ret;
6041
6042         kinfo = &vport->nic.kinfo;
6043         for (i = 0; i < kinfo->num_tqps; i++) {
6044                 ret = hclge_tqp_enable(hdev, i, 0, en);
6045                 if (ret)
6046                         return ret;
6047         }
6048
6049         return 0;
6050 }
6051
6052 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6053 {
6054         struct hclge_vport *vport = hclge_get_vport(handle);
6055         struct hnae3_knic_private_info *kinfo;
6056         struct hnae3_queue *queue;
6057         struct hclge_tqp *tqp;
6058         int i;
6059
6060         kinfo = &vport->nic.kinfo;
6061         for (i = 0; i < kinfo->num_tqps; i++) {
6062                 queue = handle->kinfo.tqp[i];
6063                 tqp = container_of(queue, struct hclge_tqp, q);
6064                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6065         }
6066 }
6067
6068 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6069 {
6070         struct hclge_vport *vport = hclge_get_vport(handle);
6071         struct hclge_dev *hdev = vport->back;
6072
6073         if (enable) {
6074                 mod_timer(&hdev->service_timer, jiffies + HZ);
6075         } else {
6076                 del_timer_sync(&hdev->service_timer);
6077                 cancel_work_sync(&hdev->service_task);
6078                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6079         }
6080 }
6081
6082 static int hclge_ae_start(struct hnae3_handle *handle)
6083 {
6084         struct hclge_vport *vport = hclge_get_vport(handle);
6085         struct hclge_dev *hdev = vport->back;
6086
6087         /* mac enable */
6088         hclge_cfg_mac_mode(hdev, true);
6089         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6090         hdev->hw.mac.link = 0;
6091
6092         /* reset tqp stats */
6093         hclge_reset_tqp_stats(handle);
6094
6095         hclge_mac_start_phy(hdev);
6096
6097         return 0;
6098 }
6099
6100 static void hclge_ae_stop(struct hnae3_handle *handle)
6101 {
6102         struct hclge_vport *vport = hclge_get_vport(handle);
6103         struct hclge_dev *hdev = vport->back;
6104         int i;
6105
6106         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6107
6108         hclge_clear_arfs_rules(handle);
6109
6110         /* If it is not PF reset, the firmware will disable the MAC,
6111          * so it only need to stop phy here.
6112          */
6113         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6114             hdev->reset_type != HNAE3_FUNC_RESET) {
6115                 hclge_mac_stop_phy(hdev);
6116                 return;
6117         }
6118
6119         for (i = 0; i < handle->kinfo.num_tqps; i++)
6120                 hclge_reset_tqp(handle, i);
6121
6122         /* Mac disable */
6123         hclge_cfg_mac_mode(hdev, false);
6124
6125         hclge_mac_stop_phy(hdev);
6126
6127         /* reset tqp stats */
6128         hclge_reset_tqp_stats(handle);
6129         hclge_update_link_status(hdev);
6130 }
6131
6132 int hclge_vport_start(struct hclge_vport *vport)
6133 {
6134         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6135         vport->last_active_jiffies = jiffies;
6136         return 0;
6137 }
6138
6139 void hclge_vport_stop(struct hclge_vport *vport)
6140 {
6141         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6142 }
6143
6144 static int hclge_client_start(struct hnae3_handle *handle)
6145 {
6146         struct hclge_vport *vport = hclge_get_vport(handle);
6147
6148         return hclge_vport_start(vport);
6149 }
6150
6151 static void hclge_client_stop(struct hnae3_handle *handle)
6152 {
6153         struct hclge_vport *vport = hclge_get_vport(handle);
6154
6155         hclge_vport_stop(vport);
6156 }
6157
6158 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6159                                          u16 cmdq_resp, u8  resp_code,
6160                                          enum hclge_mac_vlan_tbl_opcode op)
6161 {
6162         struct hclge_dev *hdev = vport->back;
6163         int return_status = -EIO;
6164
6165         if (cmdq_resp) {
6166                 dev_err(&hdev->pdev->dev,
6167                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6168                         cmdq_resp);
6169                 return -EIO;
6170         }
6171
6172         if (op == HCLGE_MAC_VLAN_ADD) {
6173                 if ((!resp_code) || (resp_code == 1)) {
6174                         return_status = 0;
6175                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6176                         return_status = -ENOSPC;
6177                         dev_err(&hdev->pdev->dev,
6178                                 "add mac addr failed for uc_overflow.\n");
6179                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6180                         return_status = -ENOSPC;
6181                         dev_err(&hdev->pdev->dev,
6182                                 "add mac addr failed for mc_overflow.\n");
6183                 } else {
6184                         dev_err(&hdev->pdev->dev,
6185                                 "add mac addr failed for undefined, code=%d.\n",
6186                                 resp_code);
6187                 }
6188         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6189                 if (!resp_code) {
6190                         return_status = 0;
6191                 } else if (resp_code == 1) {
6192                         return_status = -ENOENT;
6193                         dev_dbg(&hdev->pdev->dev,
6194                                 "remove mac addr failed for miss.\n");
6195                 } else {
6196                         dev_err(&hdev->pdev->dev,
6197                                 "remove mac addr failed for undefined, code=%d.\n",
6198                                 resp_code);
6199                 }
6200         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6201                 if (!resp_code) {
6202                         return_status = 0;
6203                 } else if (resp_code == 1) {
6204                         return_status = -ENOENT;
6205                         dev_dbg(&hdev->pdev->dev,
6206                                 "lookup mac addr failed for miss.\n");
6207                 } else {
6208                         dev_err(&hdev->pdev->dev,
6209                                 "lookup mac addr failed for undefined, code=%d.\n",
6210                                 resp_code);
6211                 }
6212         } else {
6213                 return_status = -EINVAL;
6214                 dev_err(&hdev->pdev->dev,
6215                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6216                         op);
6217         }
6218
6219         return return_status;
6220 }
6221
6222 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6223 {
6224 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6225
6226         int word_num;
6227         int bit_num;
6228
6229         if (vfid > 255 || vfid < 0)
6230                 return -EIO;
6231
6232         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6233                 word_num = vfid / 32;
6234                 bit_num  = vfid % 32;
6235                 if (clr)
6236                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6237                 else
6238                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6239         } else {
6240                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6241                 bit_num  = vfid % 32;
6242                 if (clr)
6243                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6244                 else
6245                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6246         }
6247
6248         return 0;
6249 }
6250
6251 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6252 {
6253 #define HCLGE_DESC_NUMBER 3
6254 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6255         int i, j;
6256
6257         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6258                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6259                         if (desc[i].data[j])
6260                                 return false;
6261
6262         return true;
6263 }
6264
6265 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6266                                    const u8 *addr, bool is_mc)
6267 {
6268         const unsigned char *mac_addr = addr;
6269         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6270                        (mac_addr[0]) | (mac_addr[1] << 8);
6271         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6272
6273         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6274         if (is_mc) {
6275                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6276                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6277         }
6278
6279         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6280         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6281 }
6282
6283 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6284                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6285 {
6286         struct hclge_dev *hdev = vport->back;
6287         struct hclge_desc desc;
6288         u8 resp_code;
6289         u16 retval;
6290         int ret;
6291
6292         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6293
6294         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6295
6296         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6297         if (ret) {
6298                 dev_err(&hdev->pdev->dev,
6299                         "del mac addr failed for cmd_send, ret =%d.\n",
6300                         ret);
6301                 return ret;
6302         }
6303         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6304         retval = le16_to_cpu(desc.retval);
6305
6306         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6307                                              HCLGE_MAC_VLAN_REMOVE);
6308 }
6309
6310 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6311                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6312                                      struct hclge_desc *desc,
6313                                      bool is_mc)
6314 {
6315         struct hclge_dev *hdev = vport->back;
6316         u8 resp_code;
6317         u16 retval;
6318         int ret;
6319
6320         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6321         if (is_mc) {
6322                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6323                 memcpy(desc[0].data,
6324                        req,
6325                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6326                 hclge_cmd_setup_basic_desc(&desc[1],
6327                                            HCLGE_OPC_MAC_VLAN_ADD,
6328                                            true);
6329                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6330                 hclge_cmd_setup_basic_desc(&desc[2],
6331                                            HCLGE_OPC_MAC_VLAN_ADD,
6332                                            true);
6333                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6334         } else {
6335                 memcpy(desc[0].data,
6336                        req,
6337                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6338                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6339         }
6340         if (ret) {
6341                 dev_err(&hdev->pdev->dev,
6342                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6343                         ret);
6344                 return ret;
6345         }
6346         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6347         retval = le16_to_cpu(desc[0].retval);
6348
6349         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6350                                              HCLGE_MAC_VLAN_LKUP);
6351 }
6352
6353 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6354                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6355                                   struct hclge_desc *mc_desc)
6356 {
6357         struct hclge_dev *hdev = vport->back;
6358         int cfg_status;
6359         u8 resp_code;
6360         u16 retval;
6361         int ret;
6362
6363         if (!mc_desc) {
6364                 struct hclge_desc desc;
6365
6366                 hclge_cmd_setup_basic_desc(&desc,
6367                                            HCLGE_OPC_MAC_VLAN_ADD,
6368                                            false);
6369                 memcpy(desc.data, req,
6370                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6371                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6372                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6373                 retval = le16_to_cpu(desc.retval);
6374
6375                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6376                                                            resp_code,
6377                                                            HCLGE_MAC_VLAN_ADD);
6378         } else {
6379                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6380                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6381                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6382                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6383                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6384                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6385                 memcpy(mc_desc[0].data, req,
6386                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6387                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6388                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6389                 retval = le16_to_cpu(mc_desc[0].retval);
6390
6391                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6392                                                            resp_code,
6393                                                            HCLGE_MAC_VLAN_ADD);
6394         }
6395
6396         if (ret) {
6397                 dev_err(&hdev->pdev->dev,
6398                         "add mac addr failed for cmd_send, ret =%d.\n",
6399                         ret);
6400                 return ret;
6401         }
6402
6403         return cfg_status;
6404 }
6405
6406 static int hclge_init_umv_space(struct hclge_dev *hdev)
6407 {
6408         u16 allocated_size = 0;
6409         int ret;
6410
6411         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6412                                   true);
6413         if (ret)
6414                 return ret;
6415
6416         if (allocated_size < hdev->wanted_umv_size)
6417                 dev_warn(&hdev->pdev->dev,
6418                          "Alloc umv space failed, want %d, get %d\n",
6419                          hdev->wanted_umv_size, allocated_size);
6420
6421         mutex_init(&hdev->umv_mutex);
6422         hdev->max_umv_size = allocated_size;
6423         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6424          * preserve some unicast mac vlan table entries shared by pf
6425          * and its vfs.
6426          */
6427         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6428         hdev->share_umv_size = hdev->priv_umv_size +
6429                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6430
6431         return 0;
6432 }
6433
6434 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6435 {
6436         int ret;
6437
6438         if (hdev->max_umv_size > 0) {
6439                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6440                                           false);
6441                 if (ret)
6442                         return ret;
6443                 hdev->max_umv_size = 0;
6444         }
6445         mutex_destroy(&hdev->umv_mutex);
6446
6447         return 0;
6448 }
6449
6450 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6451                                u16 *allocated_size, bool is_alloc)
6452 {
6453         struct hclge_umv_spc_alc_cmd *req;
6454         struct hclge_desc desc;
6455         int ret;
6456
6457         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6458         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6459         if (!is_alloc)
6460                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6461
6462         req->space_size = cpu_to_le32(space_size);
6463
6464         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6465         if (ret) {
6466                 dev_err(&hdev->pdev->dev,
6467                         "%s umv space failed for cmd_send, ret =%d\n",
6468                         is_alloc ? "allocate" : "free", ret);
6469                 return ret;
6470         }
6471
6472         if (is_alloc && allocated_size)
6473                 *allocated_size = le32_to_cpu(desc.data[1]);
6474
6475         return 0;
6476 }
6477
6478 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6479 {
6480         struct hclge_vport *vport;
6481         int i;
6482
6483         for (i = 0; i < hdev->num_alloc_vport; i++) {
6484                 vport = &hdev->vport[i];
6485                 vport->used_umv_num = 0;
6486         }
6487
6488         mutex_lock(&hdev->umv_mutex);
6489         hdev->share_umv_size = hdev->priv_umv_size +
6490                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6491         mutex_unlock(&hdev->umv_mutex);
6492 }
6493
6494 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6495 {
6496         struct hclge_dev *hdev = vport->back;
6497         bool is_full;
6498
6499         mutex_lock(&hdev->umv_mutex);
6500         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6501                    hdev->share_umv_size == 0);
6502         mutex_unlock(&hdev->umv_mutex);
6503
6504         return is_full;
6505 }
6506
6507 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6508 {
6509         struct hclge_dev *hdev = vport->back;
6510
6511         mutex_lock(&hdev->umv_mutex);
6512         if (is_free) {
6513                 if (vport->used_umv_num > hdev->priv_umv_size)
6514                         hdev->share_umv_size++;
6515
6516                 if (vport->used_umv_num > 0)
6517                         vport->used_umv_num--;
6518         } else {
6519                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6520                     hdev->share_umv_size > 0)
6521                         hdev->share_umv_size--;
6522                 vport->used_umv_num++;
6523         }
6524         mutex_unlock(&hdev->umv_mutex);
6525 }
6526
6527 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6528                              const unsigned char *addr)
6529 {
6530         struct hclge_vport *vport = hclge_get_vport(handle);
6531
6532         return hclge_add_uc_addr_common(vport, addr);
6533 }
6534
6535 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6536                              const unsigned char *addr)
6537 {
6538         struct hclge_dev *hdev = vport->back;
6539         struct hclge_mac_vlan_tbl_entry_cmd req;
6540         struct hclge_desc desc;
6541         u16 egress_port = 0;
6542         int ret;
6543
6544         /* mac addr check */
6545         if (is_zero_ether_addr(addr) ||
6546             is_broadcast_ether_addr(addr) ||
6547             is_multicast_ether_addr(addr)) {
6548                 dev_err(&hdev->pdev->dev,
6549                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6550                          addr, is_zero_ether_addr(addr),
6551                          is_broadcast_ether_addr(addr),
6552                          is_multicast_ether_addr(addr));
6553                 return -EINVAL;
6554         }
6555
6556         memset(&req, 0, sizeof(req));
6557
6558         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6559                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6560
6561         req.egress_port = cpu_to_le16(egress_port);
6562
6563         hclge_prepare_mac_addr(&req, addr, false);
6564
6565         /* Lookup the mac address in the mac_vlan table, and add
6566          * it if the entry is inexistent. Repeated unicast entry
6567          * is not allowed in the mac vlan table.
6568          */
6569         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6570         if (ret == -ENOENT) {
6571                 if (!hclge_is_umv_space_full(vport)) {
6572                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6573                         if (!ret)
6574                                 hclge_update_umv_space(vport, false);
6575                         return ret;
6576                 }
6577
6578                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6579                         hdev->priv_umv_size);
6580
6581                 return -ENOSPC;
6582         }
6583
6584         /* check if we just hit the duplicate */
6585         if (!ret) {
6586                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6587                          vport->vport_id, addr);
6588                 return 0;
6589         }
6590
6591         dev_err(&hdev->pdev->dev,
6592                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6593                 addr);
6594
6595         return ret;
6596 }
6597
6598 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6599                             const unsigned char *addr)
6600 {
6601         struct hclge_vport *vport = hclge_get_vport(handle);
6602
6603         return hclge_rm_uc_addr_common(vport, addr);
6604 }
6605
6606 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6607                             const unsigned char *addr)
6608 {
6609         struct hclge_dev *hdev = vport->back;
6610         struct hclge_mac_vlan_tbl_entry_cmd req;
6611         int ret;
6612
6613         /* mac addr check */
6614         if (is_zero_ether_addr(addr) ||
6615             is_broadcast_ether_addr(addr) ||
6616             is_multicast_ether_addr(addr)) {
6617                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6618                         addr);
6619                 return -EINVAL;
6620         }
6621
6622         memset(&req, 0, sizeof(req));
6623         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6624         hclge_prepare_mac_addr(&req, addr, false);
6625         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6626         if (!ret)
6627                 hclge_update_umv_space(vport, true);
6628
6629         return ret;
6630 }
6631
6632 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6633                              const unsigned char *addr)
6634 {
6635         struct hclge_vport *vport = hclge_get_vport(handle);
6636
6637         return hclge_add_mc_addr_common(vport, addr);
6638 }
6639
6640 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6641                              const unsigned char *addr)
6642 {
6643         struct hclge_dev *hdev = vport->back;
6644         struct hclge_mac_vlan_tbl_entry_cmd req;
6645         struct hclge_desc desc[3];
6646         int status;
6647
6648         /* mac addr check */
6649         if (!is_multicast_ether_addr(addr)) {
6650                 dev_err(&hdev->pdev->dev,
6651                         "Add mc mac err! invalid mac:%pM.\n",
6652                          addr);
6653                 return -EINVAL;
6654         }
6655         memset(&req, 0, sizeof(req));
6656         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6657         hclge_prepare_mac_addr(&req, addr, true);
6658         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6659         if (status) {
6660                 /* This mac addr do not exist, add new entry for it */
6661                 memset(desc[0].data, 0, sizeof(desc[0].data));
6662                 memset(desc[1].data, 0, sizeof(desc[0].data));
6663                 memset(desc[2].data, 0, sizeof(desc[0].data));
6664         }
6665         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6666         if (status)
6667                 return status;
6668         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6669
6670         if (status == -ENOSPC)
6671                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6672
6673         return status;
6674 }
6675
6676 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6677                             const unsigned char *addr)
6678 {
6679         struct hclge_vport *vport = hclge_get_vport(handle);
6680
6681         return hclge_rm_mc_addr_common(vport, addr);
6682 }
6683
6684 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6685                             const unsigned char *addr)
6686 {
6687         struct hclge_dev *hdev = vport->back;
6688         struct hclge_mac_vlan_tbl_entry_cmd req;
6689         enum hclge_cmd_status status;
6690         struct hclge_desc desc[3];
6691
6692         /* mac addr check */
6693         if (!is_multicast_ether_addr(addr)) {
6694                 dev_dbg(&hdev->pdev->dev,
6695                         "Remove mc mac err! invalid mac:%pM.\n",
6696                          addr);
6697                 return -EINVAL;
6698         }
6699
6700         memset(&req, 0, sizeof(req));
6701         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6702         hclge_prepare_mac_addr(&req, addr, true);
6703         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6704         if (!status) {
6705                 /* This mac addr exist, remove this handle's VFID for it */
6706                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6707                 if (status)
6708                         return status;
6709
6710                 if (hclge_is_all_function_id_zero(desc))
6711                         /* All the vfid is zero, so need to delete this entry */
6712                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6713                 else
6714                         /* Not all the vfid is zero, update the vfid */
6715                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6716
6717         } else {
6718                 /* Maybe this mac address is in mta table, but it cannot be
6719                  * deleted here because an entry of mta represents an address
6720                  * range rather than a specific address. the delete action to
6721                  * all entries will take effect in update_mta_status called by
6722                  * hns3_nic_set_rx_mode.
6723                  */
6724                 status = 0;
6725         }
6726
6727         return status;
6728 }
6729
6730 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6731                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6732 {
6733         struct hclge_vport_mac_addr_cfg *mac_cfg;
6734         struct list_head *list;
6735
6736         if (!vport->vport_id)
6737                 return;
6738
6739         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6740         if (!mac_cfg)
6741                 return;
6742
6743         mac_cfg->hd_tbl_status = true;
6744         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6745
6746         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6747                &vport->uc_mac_list : &vport->mc_mac_list;
6748
6749         list_add_tail(&mac_cfg->node, list);
6750 }
6751
6752 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6753                               bool is_write_tbl,
6754                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6755 {
6756         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6757         struct list_head *list;
6758         bool uc_flag, mc_flag;
6759
6760         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6761                &vport->uc_mac_list : &vport->mc_mac_list;
6762
6763         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6764         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6765
6766         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6767                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6768                         if (uc_flag && mac_cfg->hd_tbl_status)
6769                                 hclge_rm_uc_addr_common(vport, mac_addr);
6770
6771                         if (mc_flag && mac_cfg->hd_tbl_status)
6772                                 hclge_rm_mc_addr_common(vport, mac_addr);
6773
6774                         list_del(&mac_cfg->node);
6775                         kfree(mac_cfg);
6776                         break;
6777                 }
6778         }
6779 }
6780
6781 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6782                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6783 {
6784         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6785         struct list_head *list;
6786
6787         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6788                &vport->uc_mac_list : &vport->mc_mac_list;
6789
6790         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6791                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6792                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6793
6794                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6795                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6796
6797                 mac_cfg->hd_tbl_status = false;
6798                 if (is_del_list) {
6799                         list_del(&mac_cfg->node);
6800                         kfree(mac_cfg);
6801                 }
6802         }
6803 }
6804
6805 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6806 {
6807         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6808         struct hclge_vport *vport;
6809         int i;
6810
6811         mutex_lock(&hdev->vport_cfg_mutex);
6812         for (i = 0; i < hdev->num_alloc_vport; i++) {
6813                 vport = &hdev->vport[i];
6814                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6815                         list_del(&mac->node);
6816                         kfree(mac);
6817                 }
6818
6819                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6820                         list_del(&mac->node);
6821                         kfree(mac);
6822                 }
6823         }
6824         mutex_unlock(&hdev->vport_cfg_mutex);
6825 }
6826
6827 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6828                                               u16 cmdq_resp, u8 resp_code)
6829 {
6830 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6831 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6832 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6833 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6834
6835         int return_status;
6836
6837         if (cmdq_resp) {
6838                 dev_err(&hdev->pdev->dev,
6839                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6840                         cmdq_resp);
6841                 return -EIO;
6842         }
6843
6844         switch (resp_code) {
6845         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6846         case HCLGE_ETHERTYPE_ALREADY_ADD:
6847                 return_status = 0;
6848                 break;
6849         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6850                 dev_err(&hdev->pdev->dev,
6851                         "add mac ethertype failed for manager table overflow.\n");
6852                 return_status = -EIO;
6853                 break;
6854         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6855                 dev_err(&hdev->pdev->dev,
6856                         "add mac ethertype failed for key conflict.\n");
6857                 return_status = -EIO;
6858                 break;
6859         default:
6860                 dev_err(&hdev->pdev->dev,
6861                         "add mac ethertype failed for undefined, code=%d.\n",
6862                         resp_code);
6863                 return_status = -EIO;
6864         }
6865
6866         return return_status;
6867 }
6868
6869 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6870                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6871 {
6872         struct hclge_desc desc;
6873         u8 resp_code;
6874         u16 retval;
6875         int ret;
6876
6877         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6878         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6879
6880         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6881         if (ret) {
6882                 dev_err(&hdev->pdev->dev,
6883                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6884                         ret);
6885                 return ret;
6886         }
6887
6888         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6889         retval = le16_to_cpu(desc.retval);
6890
6891         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6892 }
6893
6894 static int init_mgr_tbl(struct hclge_dev *hdev)
6895 {
6896         int ret;
6897         int i;
6898
6899         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6900                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6901                 if (ret) {
6902                         dev_err(&hdev->pdev->dev,
6903                                 "add mac ethertype failed, ret =%d.\n",
6904                                 ret);
6905                         return ret;
6906                 }
6907         }
6908
6909         return 0;
6910 }
6911
6912 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6913 {
6914         struct hclge_vport *vport = hclge_get_vport(handle);
6915         struct hclge_dev *hdev = vport->back;
6916
6917         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6918 }
6919
6920 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6921                               bool is_first)
6922 {
6923         const unsigned char *new_addr = (const unsigned char *)p;
6924         struct hclge_vport *vport = hclge_get_vport(handle);
6925         struct hclge_dev *hdev = vport->back;
6926         int ret;
6927
6928         /* mac addr check */
6929         if (is_zero_ether_addr(new_addr) ||
6930             is_broadcast_ether_addr(new_addr) ||
6931             is_multicast_ether_addr(new_addr)) {
6932                 dev_err(&hdev->pdev->dev,
6933                         "Change uc mac err! invalid mac:%p.\n",
6934                          new_addr);
6935                 return -EINVAL;
6936         }
6937
6938         if ((!is_first || is_kdump_kernel()) &&
6939             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6940                 dev_warn(&hdev->pdev->dev,
6941                          "remove old uc mac address fail.\n");
6942
6943         ret = hclge_add_uc_addr(handle, new_addr);
6944         if (ret) {
6945                 dev_err(&hdev->pdev->dev,
6946                         "add uc mac address fail, ret =%d.\n",
6947                         ret);
6948
6949                 if (!is_first &&
6950                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6951                         dev_err(&hdev->pdev->dev,
6952                                 "restore uc mac address fail.\n");
6953
6954                 return -EIO;
6955         }
6956
6957         ret = hclge_pause_addr_cfg(hdev, new_addr);
6958         if (ret) {
6959                 dev_err(&hdev->pdev->dev,
6960                         "configure mac pause address fail, ret =%d.\n",
6961                         ret);
6962                 return -EIO;
6963         }
6964
6965         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6966
6967         return 0;
6968 }
6969
6970 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6971                           int cmd)
6972 {
6973         struct hclge_vport *vport = hclge_get_vport(handle);
6974         struct hclge_dev *hdev = vport->back;
6975
6976         if (!hdev->hw.mac.phydev)
6977                 return -EOPNOTSUPP;
6978
6979         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6980 }
6981
6982 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6983                                       u8 fe_type, bool filter_en, u8 vf_id)
6984 {
6985         struct hclge_vlan_filter_ctrl_cmd *req;
6986         struct hclge_desc desc;
6987         int ret;
6988
6989         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6990
6991         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6992         req->vlan_type = vlan_type;
6993         req->vlan_fe = filter_en ? fe_type : 0;
6994         req->vf_id = vf_id;
6995
6996         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6997         if (ret)
6998                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6999                         ret);
7000
7001         return ret;
7002 }
7003
7004 #define HCLGE_FILTER_TYPE_VF            0
7005 #define HCLGE_FILTER_TYPE_PORT          1
7006 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7007 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7008 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7009 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7010 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7011 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7012                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7013 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7014                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7015
7016 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7017 {
7018         struct hclge_vport *vport = hclge_get_vport(handle);
7019         struct hclge_dev *hdev = vport->back;
7020
7021         if (hdev->pdev->revision >= 0x21) {
7022                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7023                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7024                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7025                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7026         } else {
7027                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7028                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7029                                            0);
7030         }
7031         if (enable)
7032                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7033         else
7034                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7035 }
7036
7037 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7038                                     bool is_kill, u16 vlan, u8 qos,
7039                                     __be16 proto)
7040 {
7041 #define HCLGE_MAX_VF_BYTES  16
7042         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7043         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7044         struct hclge_desc desc[2];
7045         u8 vf_byte_val;
7046         u8 vf_byte_off;
7047         int ret;
7048
7049         /* if vf vlan table is full, firmware will close vf vlan filter, it
7050          * is unable and unnecessary to add new vlan id to vf vlan filter
7051          */
7052         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7053                 return 0;
7054
7055         hclge_cmd_setup_basic_desc(&desc[0],
7056                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7057         hclge_cmd_setup_basic_desc(&desc[1],
7058                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7059
7060         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7061
7062         vf_byte_off = vfid / 8;
7063         vf_byte_val = 1 << (vfid % 8);
7064
7065         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7066         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7067
7068         req0->vlan_id  = cpu_to_le16(vlan);
7069         req0->vlan_cfg = is_kill;
7070
7071         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7072                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7073         else
7074                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7075
7076         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7077         if (ret) {
7078                 dev_err(&hdev->pdev->dev,
7079                         "Send vf vlan command fail, ret =%d.\n",
7080                         ret);
7081                 return ret;
7082         }
7083
7084         if (!is_kill) {
7085 #define HCLGE_VF_VLAN_NO_ENTRY  2
7086                 if (!req0->resp_code || req0->resp_code == 1)
7087                         return 0;
7088
7089                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7090                         set_bit(vfid, hdev->vf_vlan_full);
7091                         dev_warn(&hdev->pdev->dev,
7092                                  "vf vlan table is full, vf vlan filter is disabled\n");
7093                         return 0;
7094                 }
7095
7096                 dev_err(&hdev->pdev->dev,
7097                         "Add vf vlan filter fail, ret =%d.\n",
7098                         req0->resp_code);
7099         } else {
7100 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7101                 if (!req0->resp_code)
7102                         return 0;
7103
7104                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7105                         dev_warn(&hdev->pdev->dev,
7106                                  "vlan %d filter is not in vf vlan table\n",
7107                                  vlan);
7108                         return 0;
7109                 }
7110
7111                 dev_err(&hdev->pdev->dev,
7112                         "Kill vf vlan filter fail, ret =%d.\n",
7113                         req0->resp_code);
7114         }
7115
7116         return -EIO;
7117 }
7118
7119 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7120                                       u16 vlan_id, bool is_kill)
7121 {
7122         struct hclge_vlan_filter_pf_cfg_cmd *req;
7123         struct hclge_desc desc;
7124         u8 vlan_offset_byte_val;
7125         u8 vlan_offset_byte;
7126         u8 vlan_offset_160;
7127         int ret;
7128
7129         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7130
7131         vlan_offset_160 = vlan_id / 160;
7132         vlan_offset_byte = (vlan_id % 160) / 8;
7133         vlan_offset_byte_val = 1 << (vlan_id % 8);
7134
7135         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7136         req->vlan_offset = vlan_offset_160;
7137         req->vlan_cfg = is_kill;
7138         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7139
7140         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7141         if (ret)
7142                 dev_err(&hdev->pdev->dev,
7143                         "port vlan command, send fail, ret =%d.\n", ret);
7144         return ret;
7145 }
7146
7147 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7148                                     u16 vport_id, u16 vlan_id, u8 qos,
7149                                     bool is_kill)
7150 {
7151         u16 vport_idx, vport_num = 0;
7152         int ret;
7153
7154         if (is_kill && !vlan_id)
7155                 return 0;
7156
7157         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7158                                        0, proto);
7159         if (ret) {
7160                 dev_err(&hdev->pdev->dev,
7161                         "Set %d vport vlan filter config fail, ret =%d.\n",
7162                         vport_id, ret);
7163                 return ret;
7164         }
7165
7166         /* vlan 0 may be added twice when 8021q module is enabled */
7167         if (!is_kill && !vlan_id &&
7168             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7169                 return 0;
7170
7171         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7172                 dev_err(&hdev->pdev->dev,
7173                         "Add port vlan failed, vport %d is already in vlan %d\n",
7174                         vport_id, vlan_id);
7175                 return -EINVAL;
7176         }
7177
7178         if (is_kill &&
7179             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7180                 dev_err(&hdev->pdev->dev,
7181                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7182                         vport_id, vlan_id);
7183                 return -EINVAL;
7184         }
7185
7186         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7187                 vport_num++;
7188
7189         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7190                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7191                                                  is_kill);
7192
7193         return ret;
7194 }
7195
7196 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7197 {
7198         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7199         struct hclge_vport_vtag_tx_cfg_cmd *req;
7200         struct hclge_dev *hdev = vport->back;
7201         struct hclge_desc desc;
7202         int status;
7203
7204         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7205
7206         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7207         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7208         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7209         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7210                       vcfg->accept_tag1 ? 1 : 0);
7211         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7212                       vcfg->accept_untag1 ? 1 : 0);
7213         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7214                       vcfg->accept_tag2 ? 1 : 0);
7215         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7216                       vcfg->accept_untag2 ? 1 : 0);
7217         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7218                       vcfg->insert_tag1_en ? 1 : 0);
7219         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7220                       vcfg->insert_tag2_en ? 1 : 0);
7221         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7222
7223         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7224         req->vf_bitmap[req->vf_offset] =
7225                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7226
7227         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7228         if (status)
7229                 dev_err(&hdev->pdev->dev,
7230                         "Send port txvlan cfg command fail, ret =%d\n",
7231                         status);
7232
7233         return status;
7234 }
7235
7236 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7237 {
7238         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7239         struct hclge_vport_vtag_rx_cfg_cmd *req;
7240         struct hclge_dev *hdev = vport->back;
7241         struct hclge_desc desc;
7242         int status;
7243
7244         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7245
7246         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7247         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7248                       vcfg->strip_tag1_en ? 1 : 0);
7249         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7250                       vcfg->strip_tag2_en ? 1 : 0);
7251         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7252                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7253         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7254                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7255
7256         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7257         req->vf_bitmap[req->vf_offset] =
7258                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7259
7260         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7261         if (status)
7262                 dev_err(&hdev->pdev->dev,
7263                         "Send port rxvlan cfg command fail, ret =%d\n",
7264                         status);
7265
7266         return status;
7267 }
7268
7269 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7270                                   u16 port_base_vlan_state,
7271                                   u16 vlan_tag)
7272 {
7273         int ret;
7274
7275         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7276                 vport->txvlan_cfg.accept_tag1 = true;
7277                 vport->txvlan_cfg.insert_tag1_en = false;
7278                 vport->txvlan_cfg.default_tag1 = 0;
7279         } else {
7280                 vport->txvlan_cfg.accept_tag1 = false;
7281                 vport->txvlan_cfg.insert_tag1_en = true;
7282                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7283         }
7284
7285         vport->txvlan_cfg.accept_untag1 = true;
7286
7287         /* accept_tag2 and accept_untag2 are not supported on
7288          * pdev revision(0x20), new revision support them,
7289          * this two fields can not be configured by user.
7290          */
7291         vport->txvlan_cfg.accept_tag2 = true;
7292         vport->txvlan_cfg.accept_untag2 = true;
7293         vport->txvlan_cfg.insert_tag2_en = false;
7294         vport->txvlan_cfg.default_tag2 = 0;
7295
7296         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7297                 vport->rxvlan_cfg.strip_tag1_en = false;
7298                 vport->rxvlan_cfg.strip_tag2_en =
7299                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7300         } else {
7301                 vport->rxvlan_cfg.strip_tag1_en =
7302                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7303                 vport->rxvlan_cfg.strip_tag2_en = true;
7304         }
7305         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7306         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7307
7308         ret = hclge_set_vlan_tx_offload_cfg(vport);
7309         if (ret)
7310                 return ret;
7311
7312         return hclge_set_vlan_rx_offload_cfg(vport);
7313 }
7314
7315 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7316 {
7317         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7318         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7319         struct hclge_desc desc;
7320         int status;
7321
7322         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7323         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7324         rx_req->ot_fst_vlan_type =
7325                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7326         rx_req->ot_sec_vlan_type =
7327                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7328         rx_req->in_fst_vlan_type =
7329                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7330         rx_req->in_sec_vlan_type =
7331                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7332
7333         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7334         if (status) {
7335                 dev_err(&hdev->pdev->dev,
7336                         "Send rxvlan protocol type command fail, ret =%d\n",
7337                         status);
7338                 return status;
7339         }
7340
7341         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7342
7343         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7344         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7345         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7346
7347         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7348         if (status)
7349                 dev_err(&hdev->pdev->dev,
7350                         "Send txvlan protocol type command fail, ret =%d\n",
7351                         status);
7352
7353         return status;
7354 }
7355
7356 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7357 {
7358 #define HCLGE_DEF_VLAN_TYPE             0x8100
7359
7360         struct hnae3_handle *handle = &hdev->vport[0].nic;
7361         struct hclge_vport *vport;
7362         int ret;
7363         int i;
7364
7365         if (hdev->pdev->revision >= 0x21) {
7366                 /* for revision 0x21, vf vlan filter is per function */
7367                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7368                         vport = &hdev->vport[i];
7369                         ret = hclge_set_vlan_filter_ctrl(hdev,
7370                                                          HCLGE_FILTER_TYPE_VF,
7371                                                          HCLGE_FILTER_FE_EGRESS,
7372                                                          true,
7373                                                          vport->vport_id);
7374                         if (ret)
7375                                 return ret;
7376                 }
7377
7378                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7379                                                  HCLGE_FILTER_FE_INGRESS, true,
7380                                                  0);
7381                 if (ret)
7382                         return ret;
7383         } else {
7384                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7385                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7386                                                  true, 0);
7387                 if (ret)
7388                         return ret;
7389         }
7390
7391         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7392
7393         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7394         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7395         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7396         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7397         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7398         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7399
7400         ret = hclge_set_vlan_protocol_type(hdev);
7401         if (ret)
7402                 return ret;
7403
7404         for (i = 0; i < hdev->num_alloc_vport; i++) {
7405                 u16 vlan_tag;
7406
7407                 vport = &hdev->vport[i];
7408                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7409
7410                 ret = hclge_vlan_offload_cfg(vport,
7411                                              vport->port_base_vlan_cfg.state,
7412                                              vlan_tag);
7413                 if (ret)
7414                         return ret;
7415         }
7416
7417         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7418 }
7419
7420 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7421                                        bool writen_to_tbl)
7422 {
7423         struct hclge_vport_vlan_cfg *vlan;
7424
7425         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7426         if (!vlan)
7427                 return;
7428
7429         vlan->hd_tbl_status = writen_to_tbl;
7430         vlan->vlan_id = vlan_id;
7431
7432         list_add_tail(&vlan->node, &vport->vlan_list);
7433 }
7434
7435 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7436 {
7437         struct hclge_vport_vlan_cfg *vlan, *tmp;
7438         struct hclge_dev *hdev = vport->back;
7439         int ret;
7440
7441         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7442                 if (!vlan->hd_tbl_status) {
7443                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7444                                                        vport->vport_id,
7445                                                        vlan->vlan_id, 0, false);
7446                         if (ret) {
7447                                 dev_err(&hdev->pdev->dev,
7448                                         "restore vport vlan list failed, ret=%d\n",
7449                                         ret);
7450                                 return ret;
7451                         }
7452                 }
7453                 vlan->hd_tbl_status = true;
7454         }
7455
7456         return 0;
7457 }
7458
7459 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7460                                       bool is_write_tbl)
7461 {
7462         struct hclge_vport_vlan_cfg *vlan, *tmp;
7463         struct hclge_dev *hdev = vport->back;
7464
7465         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7466                 if (vlan->vlan_id == vlan_id) {
7467                         if (is_write_tbl && vlan->hd_tbl_status)
7468                                 hclge_set_vlan_filter_hw(hdev,
7469                                                          htons(ETH_P_8021Q),
7470                                                          vport->vport_id,
7471                                                          vlan_id, 0,
7472                                                          true);
7473
7474                         list_del(&vlan->node);
7475                         kfree(vlan);
7476                         break;
7477                 }
7478         }
7479 }
7480
7481 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7482 {
7483         struct hclge_vport_vlan_cfg *vlan, *tmp;
7484         struct hclge_dev *hdev = vport->back;
7485
7486         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7487                 if (vlan->hd_tbl_status)
7488                         hclge_set_vlan_filter_hw(hdev,
7489                                                  htons(ETH_P_8021Q),
7490                                                  vport->vport_id,
7491                                                  vlan->vlan_id, 0,
7492                                                  true);
7493
7494                 vlan->hd_tbl_status = false;
7495                 if (is_del_list) {
7496                         list_del(&vlan->node);
7497                         kfree(vlan);
7498                 }
7499         }
7500 }
7501
7502 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7503 {
7504         struct hclge_vport_vlan_cfg *vlan, *tmp;
7505         struct hclge_vport *vport;
7506         int i;
7507
7508         mutex_lock(&hdev->vport_cfg_mutex);
7509         for (i = 0; i < hdev->num_alloc_vport; i++) {
7510                 vport = &hdev->vport[i];
7511                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7512                         list_del(&vlan->node);
7513                         kfree(vlan);
7514                 }
7515         }
7516         mutex_unlock(&hdev->vport_cfg_mutex);
7517 }
7518
7519 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7520 {
7521         struct hclge_vport *vport = hclge_get_vport(handle);
7522         struct hclge_vport_vlan_cfg *vlan, *tmp;
7523         struct hclge_dev *hdev = vport->back;
7524         u16 vlan_proto, qos;
7525         u16 state, vlan_id;
7526         int i;
7527
7528         mutex_lock(&hdev->vport_cfg_mutex);
7529         for (i = 0; i < hdev->num_alloc_vport; i++) {
7530                 vport = &hdev->vport[i];
7531                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7532                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7533                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7534                 state = vport->port_base_vlan_cfg.state;
7535
7536                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7537                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7538                                                  vport->vport_id, vlan_id, qos,
7539                                                  false);
7540                         continue;
7541                 }
7542
7543                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7544                         if (vlan->hd_tbl_status)
7545                                 hclge_set_vlan_filter_hw(hdev,
7546                                                          htons(ETH_P_8021Q),
7547                                                          vport->vport_id,
7548                                                          vlan->vlan_id, 0,
7549                                                          false);
7550                 }
7551         }
7552
7553         mutex_unlock(&hdev->vport_cfg_mutex);
7554 }
7555
7556 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7557 {
7558         struct hclge_vport *vport = hclge_get_vport(handle);
7559
7560         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7561                 vport->rxvlan_cfg.strip_tag1_en = false;
7562                 vport->rxvlan_cfg.strip_tag2_en = enable;
7563         } else {
7564                 vport->rxvlan_cfg.strip_tag1_en = enable;
7565                 vport->rxvlan_cfg.strip_tag2_en = true;
7566         }
7567         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7568         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7569         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7570
7571         return hclge_set_vlan_rx_offload_cfg(vport);
7572 }
7573
7574 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7575                                             u16 port_base_vlan_state,
7576                                             struct hclge_vlan_info *new_info,
7577                                             struct hclge_vlan_info *old_info)
7578 {
7579         struct hclge_dev *hdev = vport->back;
7580         int ret;
7581
7582         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7583                 hclge_rm_vport_all_vlan_table(vport, false);
7584                 return hclge_set_vlan_filter_hw(hdev,
7585                                                  htons(new_info->vlan_proto),
7586                                                  vport->vport_id,
7587                                                  new_info->vlan_tag,
7588                                                  new_info->qos, false);
7589         }
7590
7591         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7592                                        vport->vport_id, old_info->vlan_tag,
7593                                        old_info->qos, true);
7594         if (ret)
7595                 return ret;
7596
7597         return hclge_add_vport_all_vlan_table(vport);
7598 }
7599
7600 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7601                                     struct hclge_vlan_info *vlan_info)
7602 {
7603         struct hnae3_handle *nic = &vport->nic;
7604         struct hclge_vlan_info *old_vlan_info;
7605         struct hclge_dev *hdev = vport->back;
7606         int ret;
7607
7608         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7609
7610         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7611         if (ret)
7612                 return ret;
7613
7614         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7615                 /* add new VLAN tag */
7616                 ret = hclge_set_vlan_filter_hw(hdev,
7617                                                htons(vlan_info->vlan_proto),
7618                                                vport->vport_id,
7619                                                vlan_info->vlan_tag,
7620                                                vlan_info->qos, false);
7621                 if (ret)
7622                         return ret;
7623
7624                 /* remove old VLAN tag */
7625                 ret = hclge_set_vlan_filter_hw(hdev,
7626                                                htons(old_vlan_info->vlan_proto),
7627                                                vport->vport_id,
7628                                                old_vlan_info->vlan_tag,
7629                                                old_vlan_info->qos, true);
7630                 if (ret)
7631                         return ret;
7632
7633                 goto update;
7634         }
7635
7636         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7637                                                old_vlan_info);
7638         if (ret)
7639                 return ret;
7640
7641         /* update state only when disable/enable port based VLAN */
7642         vport->port_base_vlan_cfg.state = state;
7643         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7644                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7645         else
7646                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7647
7648 update:
7649         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7650         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7651         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7652
7653         return 0;
7654 }
7655
7656 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7657                                           enum hnae3_port_base_vlan_state state,
7658                                           u16 vlan)
7659 {
7660         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7661                 if (!vlan)
7662                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7663                 else
7664                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7665         } else {
7666                 if (!vlan)
7667                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7668                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7669                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7670                 else
7671                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7672         }
7673 }
7674
7675 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7676                                     u16 vlan, u8 qos, __be16 proto)
7677 {
7678         struct hclge_vport *vport = hclge_get_vport(handle);
7679         struct hclge_dev *hdev = vport->back;
7680         struct hclge_vlan_info vlan_info;
7681         u16 state;
7682         int ret;
7683
7684         if (hdev->pdev->revision == 0x20)
7685                 return -EOPNOTSUPP;
7686
7687         /* qos is a 3 bits value, so can not be bigger than 7 */
7688         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7689                 return -EINVAL;
7690         if (proto != htons(ETH_P_8021Q))
7691                 return -EPROTONOSUPPORT;
7692
7693         vport = &hdev->vport[vfid];
7694         state = hclge_get_port_base_vlan_state(vport,
7695                                                vport->port_base_vlan_cfg.state,
7696                                                vlan);
7697         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7698                 return 0;
7699
7700         vlan_info.vlan_tag = vlan;
7701         vlan_info.qos = qos;
7702         vlan_info.vlan_proto = ntohs(proto);
7703
7704         /* update port based VLAN for PF */
7705         if (!vfid) {
7706                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7707                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7708                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7709
7710                 return ret;
7711         }
7712
7713         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7714                 return hclge_update_port_base_vlan_cfg(vport, state,
7715                                                        &vlan_info);
7716         } else {
7717                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7718                                                         (u8)vfid, state,
7719                                                         vlan, qos,
7720                                                         ntohs(proto));
7721                 return ret;
7722         }
7723 }
7724
7725 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7726                           u16 vlan_id, bool is_kill)
7727 {
7728         struct hclge_vport *vport = hclge_get_vport(handle);
7729         struct hclge_dev *hdev = vport->back;
7730         bool writen_to_tbl = false;
7731         int ret = 0;
7732
7733         /* when port based VLAN enabled, we use port based VLAN as the VLAN
7734          * filter entry. In this case, we don't update VLAN filter table
7735          * when user add new VLAN or remove exist VLAN, just update the vport
7736          * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7737          * table until port based VLAN disabled
7738          */
7739         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7740                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7741                                                vlan_id, 0, is_kill);
7742                 writen_to_tbl = true;
7743         }
7744
7745         if (ret)
7746                 return ret;
7747
7748         if (is_kill)
7749                 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7750         else
7751                 hclge_add_vport_vlan_table(vport, vlan_id,
7752                                            writen_to_tbl);
7753
7754         return 0;
7755 }
7756
7757 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7758 {
7759         struct hclge_config_max_frm_size_cmd *req;
7760         struct hclge_desc desc;
7761
7762         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7763
7764         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7765         req->max_frm_size = cpu_to_le16(new_mps);
7766         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7767
7768         return hclge_cmd_send(&hdev->hw, &desc, 1);
7769 }
7770
7771 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7772 {
7773         struct hclge_vport *vport = hclge_get_vport(handle);
7774
7775         return hclge_set_vport_mtu(vport, new_mtu);
7776 }
7777
7778 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7779 {
7780         struct hclge_dev *hdev = vport->back;
7781         int i, max_frm_size, ret;
7782
7783         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7784         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7785             max_frm_size > HCLGE_MAC_MAX_FRAME)
7786                 return -EINVAL;
7787
7788         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7789         mutex_lock(&hdev->vport_lock);
7790         /* VF's mps must fit within hdev->mps */
7791         if (vport->vport_id && max_frm_size > hdev->mps) {
7792                 mutex_unlock(&hdev->vport_lock);
7793                 return -EINVAL;
7794         } else if (vport->vport_id) {
7795                 vport->mps = max_frm_size;
7796                 mutex_unlock(&hdev->vport_lock);
7797                 return 0;
7798         }
7799
7800         /* PF's mps must be greater then VF's mps */
7801         for (i = 1; i < hdev->num_alloc_vport; i++)
7802                 if (max_frm_size < hdev->vport[i].mps) {
7803                         mutex_unlock(&hdev->vport_lock);
7804                         return -EINVAL;
7805                 }
7806
7807         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7808
7809         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7810         if (ret) {
7811                 dev_err(&hdev->pdev->dev,
7812                         "Change mtu fail, ret =%d\n", ret);
7813                 goto out;
7814         }
7815
7816         hdev->mps = max_frm_size;
7817         vport->mps = max_frm_size;
7818
7819         ret = hclge_buffer_alloc(hdev);
7820         if (ret)
7821                 dev_err(&hdev->pdev->dev,
7822                         "Allocate buffer fail, ret =%d\n", ret);
7823
7824 out:
7825         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7826         mutex_unlock(&hdev->vport_lock);
7827         return ret;
7828 }
7829
7830 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7831                                     bool enable)
7832 {
7833         struct hclge_reset_tqp_queue_cmd *req;
7834         struct hclge_desc desc;
7835         int ret;
7836
7837         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7838
7839         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7840         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7841         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7842
7843         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7844         if (ret) {
7845                 dev_err(&hdev->pdev->dev,
7846                         "Send tqp reset cmd error, status =%d\n", ret);
7847                 return ret;
7848         }
7849
7850         return 0;
7851 }
7852
7853 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7854 {
7855         struct hclge_reset_tqp_queue_cmd *req;
7856         struct hclge_desc desc;
7857         int ret;
7858
7859         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7860
7861         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7862         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7863
7864         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7865         if (ret) {
7866                 dev_err(&hdev->pdev->dev,
7867                         "Get reset status error, status =%d\n", ret);
7868                 return ret;
7869         }
7870
7871         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7872 }
7873
7874 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7875 {
7876         struct hnae3_queue *queue;
7877         struct hclge_tqp *tqp;
7878
7879         queue = handle->kinfo.tqp[queue_id];
7880         tqp = container_of(queue, struct hclge_tqp, q);
7881
7882         return tqp->index;
7883 }
7884
7885 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7886 {
7887         struct hclge_vport *vport = hclge_get_vport(handle);
7888         struct hclge_dev *hdev = vport->back;
7889         int reset_try_times = 0;
7890         int reset_status;
7891         u16 queue_gid;
7892         int ret;
7893
7894         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7895
7896         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7897         if (ret) {
7898                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7899                 return ret;
7900         }
7901
7902         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7903         if (ret) {
7904                 dev_err(&hdev->pdev->dev,
7905                         "Send reset tqp cmd fail, ret = %d\n", ret);
7906                 return ret;
7907         }
7908
7909         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7910                 /* Wait for tqp hw reset */
7911                 msleep(20);
7912                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7913                 if (reset_status)
7914                         break;
7915         }
7916
7917         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7918                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7919                 return ret;
7920         }
7921
7922         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7923         if (ret)
7924                 dev_err(&hdev->pdev->dev,
7925                         "Deassert the soft reset fail, ret = %d\n", ret);
7926
7927         return ret;
7928 }
7929
7930 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7931 {
7932         struct hclge_dev *hdev = vport->back;
7933         int reset_try_times = 0;
7934         int reset_status;
7935         u16 queue_gid;
7936         int ret;
7937
7938         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7939
7940         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7941         if (ret) {
7942                 dev_warn(&hdev->pdev->dev,
7943                          "Send reset tqp cmd fail, ret = %d\n", ret);
7944                 return;
7945         }
7946
7947         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7948                 /* Wait for tqp hw reset */
7949                 msleep(20);
7950                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7951                 if (reset_status)
7952                         break;
7953         }
7954
7955         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7956                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7957                 return;
7958         }
7959
7960         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7961         if (ret)
7962                 dev_warn(&hdev->pdev->dev,
7963                          "Deassert the soft reset fail, ret = %d\n", ret);
7964 }
7965
7966 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7967 {
7968         struct hclge_vport *vport = hclge_get_vport(handle);
7969         struct hclge_dev *hdev = vport->back;
7970
7971         return hdev->fw_version;
7972 }
7973
7974 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7975 {
7976         struct phy_device *phydev = hdev->hw.mac.phydev;
7977
7978         if (!phydev)
7979                 return;
7980
7981         phy_set_asym_pause(phydev, rx_en, tx_en);
7982 }
7983
7984 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7985 {
7986         int ret;
7987
7988         if (rx_en && tx_en)
7989                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7990         else if (rx_en && !tx_en)
7991                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7992         else if (!rx_en && tx_en)
7993                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7994         else
7995                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7996
7997         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7998                 return 0;
7999
8000         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8001         if (ret) {
8002                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
8003                         ret);
8004                 return ret;
8005         }
8006
8007         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8008
8009         return 0;
8010 }
8011
8012 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8013 {
8014         struct phy_device *phydev = hdev->hw.mac.phydev;
8015         u16 remote_advertising = 0;
8016         u16 local_advertising;
8017         u32 rx_pause, tx_pause;
8018         u8 flowctl;
8019
8020         if (!phydev->link || !phydev->autoneg)
8021                 return 0;
8022
8023         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8024
8025         if (phydev->pause)
8026                 remote_advertising = LPA_PAUSE_CAP;
8027
8028         if (phydev->asym_pause)
8029                 remote_advertising |= LPA_PAUSE_ASYM;
8030
8031         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8032                                            remote_advertising);
8033         tx_pause = flowctl & FLOW_CTRL_TX;
8034         rx_pause = flowctl & FLOW_CTRL_RX;
8035
8036         if (phydev->duplex == HCLGE_MAC_HALF) {
8037                 tx_pause = 0;
8038                 rx_pause = 0;
8039         }
8040
8041         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8042 }
8043
8044 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8045                                  u32 *rx_en, u32 *tx_en)
8046 {
8047         struct hclge_vport *vport = hclge_get_vport(handle);
8048         struct hclge_dev *hdev = vport->back;
8049
8050         *auto_neg = hclge_get_autoneg(handle);
8051
8052         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8053                 *rx_en = 0;
8054                 *tx_en = 0;
8055                 return;
8056         }
8057
8058         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8059                 *rx_en = 1;
8060                 *tx_en = 0;
8061         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8062                 *tx_en = 1;
8063                 *rx_en = 0;
8064         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8065                 *rx_en = 1;
8066                 *tx_en = 1;
8067         } else {
8068                 *rx_en = 0;
8069                 *tx_en = 0;
8070         }
8071 }
8072
8073 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8074                                 u32 rx_en, u32 tx_en)
8075 {
8076         struct hclge_vport *vport = hclge_get_vport(handle);
8077         struct hclge_dev *hdev = vport->back;
8078         struct phy_device *phydev = hdev->hw.mac.phydev;
8079         u32 fc_autoneg;
8080
8081         fc_autoneg = hclge_get_autoneg(handle);
8082         if (auto_neg != fc_autoneg) {
8083                 dev_info(&hdev->pdev->dev,
8084                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8085                 return -EOPNOTSUPP;
8086         }
8087
8088         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8089                 dev_info(&hdev->pdev->dev,
8090                          "Priority flow control enabled. Cannot set link flow control.\n");
8091                 return -EOPNOTSUPP;
8092         }
8093
8094         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8095
8096         if (!fc_autoneg)
8097                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8098
8099         if (phydev)
8100                 return phy_start_aneg(phydev);
8101
8102         if (hdev->pdev->revision == 0x20)
8103                 return -EOPNOTSUPP;
8104
8105         return hclge_restart_autoneg(handle);
8106 }
8107
8108 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8109                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8110 {
8111         struct hclge_vport *vport = hclge_get_vport(handle);
8112         struct hclge_dev *hdev = vport->back;
8113
8114         if (speed)
8115                 *speed = hdev->hw.mac.speed;
8116         if (duplex)
8117                 *duplex = hdev->hw.mac.duplex;
8118         if (auto_neg)
8119                 *auto_neg = hdev->hw.mac.autoneg;
8120 }
8121
8122 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8123                                  u8 *module_type)
8124 {
8125         struct hclge_vport *vport = hclge_get_vport(handle);
8126         struct hclge_dev *hdev = vport->back;
8127
8128         if (media_type)
8129                 *media_type = hdev->hw.mac.media_type;
8130
8131         if (module_type)
8132                 *module_type = hdev->hw.mac.module_type;
8133 }
8134
8135 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8136                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8137 {
8138         struct hclge_vport *vport = hclge_get_vport(handle);
8139         struct hclge_dev *hdev = vport->back;
8140         struct phy_device *phydev = hdev->hw.mac.phydev;
8141         int mdix_ctrl, mdix, is_resolved;
8142         unsigned int retval;
8143
8144         if (!phydev) {
8145                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8146                 *tp_mdix = ETH_TP_MDI_INVALID;
8147                 return;
8148         }
8149
8150         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8151
8152         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8153         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8154                                     HCLGE_PHY_MDIX_CTRL_S);
8155
8156         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8157         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8158         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8159
8160         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8161
8162         switch (mdix_ctrl) {
8163         case 0x0:
8164                 *tp_mdix_ctrl = ETH_TP_MDI;
8165                 break;
8166         case 0x1:
8167                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8168                 break;
8169         case 0x3:
8170                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8171                 break;
8172         default:
8173                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8174                 break;
8175         }
8176
8177         if (!is_resolved)
8178                 *tp_mdix = ETH_TP_MDI_INVALID;
8179         else if (mdix)
8180                 *tp_mdix = ETH_TP_MDI_X;
8181         else
8182                 *tp_mdix = ETH_TP_MDI;
8183 }
8184
8185 static void hclge_info_show(struct hclge_dev *hdev)
8186 {
8187         struct device *dev = &hdev->pdev->dev;
8188
8189         dev_info(dev, "PF info begin:\n");
8190
8191         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8192         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8193         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8194         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8195         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8196         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8197         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8198         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8199         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8200         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8201         dev_info(dev, "This is %s PF\n",
8202                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8203         dev_info(dev, "DCB %s\n",
8204                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8205         dev_info(dev, "MQPRIO %s\n",
8206                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8207
8208         dev_info(dev, "PF info end.\n");
8209 }
8210
8211 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8212                                           struct hclge_vport *vport)
8213 {
8214         struct hnae3_client *client = vport->nic.client;
8215         struct hclge_dev *hdev = ae_dev->priv;
8216         int ret;
8217
8218         ret = client->ops->init_instance(&vport->nic);
8219         if (ret)
8220                 return ret;
8221
8222         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8223         hnae3_set_client_init_flag(client, ae_dev, 1);
8224
8225         /* Enable nic hw error interrupts */
8226         ret = hclge_config_nic_hw_error(hdev, true);
8227         if (ret)
8228                 dev_err(&ae_dev->pdev->dev,
8229                         "fail(%d) to enable hw error interrupts\n", ret);
8230
8231         if (netif_msg_drv(&hdev->vport->nic))
8232                 hclge_info_show(hdev);
8233
8234         return ret;
8235 }
8236
8237 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8238                                            struct hclge_vport *vport)
8239 {
8240         struct hnae3_client *client = vport->roce.client;
8241         struct hclge_dev *hdev = ae_dev->priv;
8242         int ret;
8243
8244         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8245             !hdev->nic_client)
8246                 return 0;
8247
8248         client = hdev->roce_client;
8249         ret = hclge_init_roce_base_info(vport);
8250         if (ret)
8251                 return ret;
8252
8253         ret = client->ops->init_instance(&vport->roce);
8254         if (ret)
8255                 return ret;
8256
8257         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8258         hnae3_set_client_init_flag(client, ae_dev, 1);
8259
8260         return 0;
8261 }
8262
8263 static int hclge_init_client_instance(struct hnae3_client *client,
8264                                       struct hnae3_ae_dev *ae_dev)
8265 {
8266         struct hclge_dev *hdev = ae_dev->priv;
8267         struct hclge_vport *vport;
8268         int i, ret;
8269
8270         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8271                 vport = &hdev->vport[i];
8272
8273                 switch (client->type) {
8274                 case HNAE3_CLIENT_KNIC:
8275
8276                         hdev->nic_client = client;
8277                         vport->nic.client = client;
8278                         ret = hclge_init_nic_client_instance(ae_dev, vport);
8279                         if (ret)
8280                                 goto clear_nic;
8281
8282                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8283                         if (ret)
8284                                 goto clear_roce;
8285
8286                         break;
8287                 case HNAE3_CLIENT_ROCE:
8288                         if (hnae3_dev_roce_supported(hdev)) {
8289                                 hdev->roce_client = client;
8290                                 vport->roce.client = client;
8291                         }
8292
8293                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8294                         if (ret)
8295                                 goto clear_roce;
8296
8297                         break;
8298                 default:
8299                         return -EINVAL;
8300                 }
8301         }
8302
8303         /* Enable roce ras interrupts */
8304         ret = hclge_config_rocee_ras_interrupt(hdev, true);
8305         if (ret)
8306                 dev_err(&ae_dev->pdev->dev,
8307                         "fail(%d) to enable roce ras interrupts\n", ret);
8308
8309         return ret;
8310
8311 clear_nic:
8312         hdev->nic_client = NULL;
8313         vport->nic.client = NULL;
8314         return ret;
8315 clear_roce:
8316         hdev->roce_client = NULL;
8317         vport->roce.client = NULL;
8318         return ret;
8319 }
8320
8321 static void hclge_uninit_client_instance(struct hnae3_client *client,
8322                                          struct hnae3_ae_dev *ae_dev)
8323 {
8324         struct hclge_dev *hdev = ae_dev->priv;
8325         struct hclge_vport *vport;
8326         int i;
8327
8328         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8329                 vport = &hdev->vport[i];
8330                 if (hdev->roce_client) {
8331                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8332                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8333                                                                 0);
8334                         hdev->roce_client = NULL;
8335                         vport->roce.client = NULL;
8336                 }
8337                 if (client->type == HNAE3_CLIENT_ROCE)
8338                         return;
8339                 if (hdev->nic_client && client->ops->uninit_instance) {
8340                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8341                         client->ops->uninit_instance(&vport->nic, 0);
8342                         hdev->nic_client = NULL;
8343                         vport->nic.client = NULL;
8344                 }
8345         }
8346 }
8347
8348 static int hclge_pci_init(struct hclge_dev *hdev)
8349 {
8350         struct pci_dev *pdev = hdev->pdev;
8351         struct hclge_hw *hw;
8352         int ret;
8353
8354         ret = pci_enable_device(pdev);
8355         if (ret) {
8356                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8357                 return ret;
8358         }
8359
8360         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8361         if (ret) {
8362                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8363                 if (ret) {
8364                         dev_err(&pdev->dev,
8365                                 "can't set consistent PCI DMA");
8366                         goto err_disable_device;
8367                 }
8368                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8369         }
8370
8371         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8372         if (ret) {
8373                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8374                 goto err_disable_device;
8375         }
8376
8377         pci_set_master(pdev);
8378         hw = &hdev->hw;
8379         hw->io_base = pcim_iomap(pdev, 2, 0);
8380         if (!hw->io_base) {
8381                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8382                 ret = -ENOMEM;
8383                 goto err_clr_master;
8384         }
8385
8386         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8387
8388         return 0;
8389 err_clr_master:
8390         pci_clear_master(pdev);
8391         pci_release_regions(pdev);
8392 err_disable_device:
8393         pci_disable_device(pdev);
8394
8395         return ret;
8396 }
8397
8398 static void hclge_pci_uninit(struct hclge_dev *hdev)
8399 {
8400         struct pci_dev *pdev = hdev->pdev;
8401
8402         pcim_iounmap(pdev, hdev->hw.io_base);
8403         pci_free_irq_vectors(pdev);
8404         pci_clear_master(pdev);
8405         pci_release_mem_regions(pdev);
8406         pci_disable_device(pdev);
8407 }
8408
8409 static void hclge_state_init(struct hclge_dev *hdev)
8410 {
8411         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8412         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8413         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8414         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8415         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8416         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8417 }
8418
8419 static void hclge_state_uninit(struct hclge_dev *hdev)
8420 {
8421         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8422         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8423
8424         if (hdev->service_timer.function)
8425                 del_timer_sync(&hdev->service_timer);
8426         if (hdev->reset_timer.function)
8427                 del_timer_sync(&hdev->reset_timer);
8428         if (hdev->service_task.func)
8429                 cancel_work_sync(&hdev->service_task);
8430         if (hdev->rst_service_task.func)
8431                 cancel_work_sync(&hdev->rst_service_task);
8432         if (hdev->mbx_service_task.func)
8433                 cancel_work_sync(&hdev->mbx_service_task);
8434 }
8435
8436 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8437 {
8438 #define HCLGE_FLR_WAIT_MS       100
8439 #define HCLGE_FLR_WAIT_CNT      50
8440         struct hclge_dev *hdev = ae_dev->priv;
8441         int cnt = 0;
8442
8443         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8444         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8445         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8446         hclge_reset_event(hdev->pdev, NULL);
8447
8448         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8449                cnt++ < HCLGE_FLR_WAIT_CNT)
8450                 msleep(HCLGE_FLR_WAIT_MS);
8451
8452         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8453                 dev_err(&hdev->pdev->dev,
8454                         "flr wait down timeout: %d\n", cnt);
8455 }
8456
8457 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8458 {
8459         struct hclge_dev *hdev = ae_dev->priv;
8460
8461         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8462 }
8463
8464 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
8465 {
8466         u16 i;
8467
8468         for (i = 0; i < hdev->num_alloc_vport; i++) {
8469                 struct hclge_vport *vport = &hdev->vport[i];
8470                 int ret;
8471
8472                  /* Send cmd to clear VF's FUNC_RST_ING */
8473                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
8474                 if (ret)
8475                         dev_warn(&hdev->pdev->dev,
8476                                  "clear vf(%d) rst failed %d!\n",
8477                                  vport->vport_id, ret);
8478         }
8479 }
8480
8481 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8482 {
8483         struct pci_dev *pdev = ae_dev->pdev;
8484         struct hclge_dev *hdev;
8485         int ret;
8486
8487         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8488         if (!hdev) {
8489                 ret = -ENOMEM;
8490                 goto out;
8491         }
8492
8493         hdev->pdev = pdev;
8494         hdev->ae_dev = ae_dev;
8495         hdev->reset_type = HNAE3_NONE_RESET;
8496         hdev->reset_level = HNAE3_FUNC_RESET;
8497         ae_dev->priv = hdev;
8498         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8499
8500         mutex_init(&hdev->vport_lock);
8501         mutex_init(&hdev->vport_cfg_mutex);
8502         spin_lock_init(&hdev->fd_rule_lock);
8503
8504         ret = hclge_pci_init(hdev);
8505         if (ret) {
8506                 dev_err(&pdev->dev, "PCI init failed\n");
8507                 goto out;
8508         }
8509
8510         /* Firmware command queue initialize */
8511         ret = hclge_cmd_queue_init(hdev);
8512         if (ret) {
8513                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8514                 goto err_pci_uninit;
8515         }
8516
8517         /* Firmware command initialize */
8518         ret = hclge_cmd_init(hdev);
8519         if (ret)
8520                 goto err_cmd_uninit;
8521
8522         ret = hclge_get_cap(hdev);
8523         if (ret) {
8524                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8525                         ret);
8526                 goto err_cmd_uninit;
8527         }
8528
8529         ret = hclge_configure(hdev);
8530         if (ret) {
8531                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8532                 goto err_cmd_uninit;
8533         }
8534
8535         ret = hclge_init_msi(hdev);
8536         if (ret) {
8537                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8538                 goto err_cmd_uninit;
8539         }
8540
8541         ret = hclge_misc_irq_init(hdev);
8542         if (ret) {
8543                 dev_err(&pdev->dev,
8544                         "Misc IRQ(vector0) init error, ret = %d.\n",
8545                         ret);
8546                 goto err_msi_uninit;
8547         }
8548
8549         ret = hclge_alloc_tqps(hdev);
8550         if (ret) {
8551                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8552                 goto err_msi_irq_uninit;
8553         }
8554
8555         ret = hclge_alloc_vport(hdev);
8556         if (ret) {
8557                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8558                 goto err_msi_irq_uninit;
8559         }
8560
8561         ret = hclge_map_tqp(hdev);
8562         if (ret) {
8563                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8564                 goto err_msi_irq_uninit;
8565         }
8566
8567         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8568                 ret = hclge_mac_mdio_config(hdev);
8569                 if (ret) {
8570                         dev_err(&hdev->pdev->dev,
8571                                 "mdio config fail ret=%d\n", ret);
8572                         goto err_msi_irq_uninit;
8573                 }
8574         }
8575
8576         ret = hclge_init_umv_space(hdev);
8577         if (ret) {
8578                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8579                 goto err_mdiobus_unreg;
8580         }
8581
8582         ret = hclge_mac_init(hdev);
8583         if (ret) {
8584                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8585                 goto err_mdiobus_unreg;
8586         }
8587
8588         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8589         if (ret) {
8590                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8591                 goto err_mdiobus_unreg;
8592         }
8593
8594         ret = hclge_config_gro(hdev, true);
8595         if (ret)
8596                 goto err_mdiobus_unreg;
8597
8598         ret = hclge_init_vlan_config(hdev);
8599         if (ret) {
8600                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8601                 goto err_mdiobus_unreg;
8602         }
8603
8604         ret = hclge_tm_schd_init(hdev);
8605         if (ret) {
8606                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8607                 goto err_mdiobus_unreg;
8608         }
8609
8610         hclge_rss_init_cfg(hdev);
8611         ret = hclge_rss_init_hw(hdev);
8612         if (ret) {
8613                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8614                 goto err_mdiobus_unreg;
8615         }
8616
8617         ret = init_mgr_tbl(hdev);
8618         if (ret) {
8619                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8620                 goto err_mdiobus_unreg;
8621         }
8622
8623         ret = hclge_init_fd_config(hdev);
8624         if (ret) {
8625                 dev_err(&pdev->dev,
8626                         "fd table init fail, ret=%d\n", ret);
8627                 goto err_mdiobus_unreg;
8628         }
8629
8630         INIT_KFIFO(hdev->mac_tnl_log);
8631
8632         hclge_dcb_ops_set(hdev);
8633
8634         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8635         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8636         INIT_WORK(&hdev->service_task, hclge_service_task);
8637         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8638         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8639
8640         hclge_clear_all_event_cause(hdev);
8641         hclge_clear_resetting_state(hdev);
8642
8643         /* Log and clear the hw errors those already occurred */
8644         hclge_handle_all_hns_hw_errors(ae_dev);
8645
8646         /* request delayed reset for the error recovery because an immediate
8647          * global reset on a PF affecting pending initialization of other PFs
8648          */
8649         if (ae_dev->hw_err_reset_req) {
8650                 enum hnae3_reset_type reset_level;
8651
8652                 reset_level = hclge_get_reset_level(ae_dev,
8653                                                     &ae_dev->hw_err_reset_req);
8654                 hclge_set_def_reset_request(ae_dev, reset_level);
8655                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
8656         }
8657
8658         /* Enable MISC vector(vector0) */
8659         hclge_enable_vector(&hdev->misc_vector, true);
8660
8661         hclge_state_init(hdev);
8662         hdev->last_reset_time = jiffies;
8663
8664         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8665         return 0;
8666
8667 err_mdiobus_unreg:
8668         if (hdev->hw.mac.phydev)
8669                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8670 err_msi_irq_uninit:
8671         hclge_misc_irq_uninit(hdev);
8672 err_msi_uninit:
8673         pci_free_irq_vectors(pdev);
8674 err_cmd_uninit:
8675         hclge_cmd_uninit(hdev);
8676 err_pci_uninit:
8677         pcim_iounmap(pdev, hdev->hw.io_base);
8678         pci_clear_master(pdev);
8679         pci_release_regions(pdev);
8680         pci_disable_device(pdev);
8681 out:
8682         return ret;
8683 }
8684
8685 static void hclge_stats_clear(struct hclge_dev *hdev)
8686 {
8687         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8688 }
8689
8690 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8691 {
8692         struct hclge_vport *vport = hdev->vport;
8693         int i;
8694
8695         for (i = 0; i < hdev->num_alloc_vport; i++) {
8696                 hclge_vport_stop(vport);
8697                 vport++;
8698         }
8699 }
8700
8701 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8702 {
8703         struct hclge_dev *hdev = ae_dev->priv;
8704         struct pci_dev *pdev = ae_dev->pdev;
8705         int ret;
8706
8707         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8708
8709         hclge_stats_clear(hdev);
8710         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8711         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8712
8713         ret = hclge_cmd_init(hdev);
8714         if (ret) {
8715                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8716                 return ret;
8717         }
8718
8719         ret = hclge_map_tqp(hdev);
8720         if (ret) {
8721                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8722                 return ret;
8723         }
8724
8725         hclge_reset_umv_space(hdev);
8726
8727         ret = hclge_mac_init(hdev);
8728         if (ret) {
8729                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8730                 return ret;
8731         }
8732
8733         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8734         if (ret) {
8735                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8736                 return ret;
8737         }
8738
8739         ret = hclge_config_gro(hdev, true);
8740         if (ret)
8741                 return ret;
8742
8743         ret = hclge_init_vlan_config(hdev);
8744         if (ret) {
8745                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8746                 return ret;
8747         }
8748
8749         ret = hclge_tm_init_hw(hdev, true);
8750         if (ret) {
8751                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8752                 return ret;
8753         }
8754
8755         ret = hclge_rss_init_hw(hdev);
8756         if (ret) {
8757                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8758                 return ret;
8759         }
8760
8761         ret = hclge_init_fd_config(hdev);
8762         if (ret) {
8763                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
8764                 return ret;
8765         }
8766
8767         /* Re-enable the hw error interrupts because
8768          * the interrupts get disabled on global reset.
8769          */
8770         ret = hclge_config_nic_hw_error(hdev, true);
8771         if (ret) {
8772                 dev_err(&pdev->dev,
8773                         "fail(%d) to re-enable NIC hw error interrupts\n",
8774                         ret);
8775                 return ret;
8776         }
8777
8778         if (hdev->roce_client) {
8779                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8780                 if (ret) {
8781                         dev_err(&pdev->dev,
8782                                 "fail(%d) to re-enable roce ras interrupts\n",
8783                                 ret);
8784                         return ret;
8785                 }
8786         }
8787
8788         hclge_reset_vport_state(hdev);
8789
8790         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8791                  HCLGE_DRIVER_NAME);
8792
8793         return 0;
8794 }
8795
8796 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8797 {
8798         struct hclge_dev *hdev = ae_dev->priv;
8799         struct hclge_mac *mac = &hdev->hw.mac;
8800
8801         hclge_state_uninit(hdev);
8802
8803         if (mac->phydev)
8804                 mdiobus_unregister(mac->mdio_bus);
8805
8806         hclge_uninit_umv_space(hdev);
8807
8808         /* Disable MISC vector(vector0) */
8809         hclge_enable_vector(&hdev->misc_vector, false);
8810         synchronize_irq(hdev->misc_vector.vector_irq);
8811
8812         /* Disable all hw interrupts */
8813         hclge_config_mac_tnl_int(hdev, false);
8814         hclge_config_nic_hw_error(hdev, false);
8815         hclge_config_rocee_ras_interrupt(hdev, false);
8816
8817         hclge_cmd_uninit(hdev);
8818         hclge_misc_irq_uninit(hdev);
8819         hclge_pci_uninit(hdev);
8820         mutex_destroy(&hdev->vport_lock);
8821         hclge_uninit_vport_mac_table(hdev);
8822         hclge_uninit_vport_vlan_table(hdev);
8823         mutex_destroy(&hdev->vport_cfg_mutex);
8824         ae_dev->priv = NULL;
8825 }
8826
8827 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8828 {
8829         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8830         struct hclge_vport *vport = hclge_get_vport(handle);
8831         struct hclge_dev *hdev = vport->back;
8832
8833         return min_t(u32, hdev->rss_size_max,
8834                      vport->alloc_tqps / kinfo->num_tc);
8835 }
8836
8837 static void hclge_get_channels(struct hnae3_handle *handle,
8838                                struct ethtool_channels *ch)
8839 {
8840         ch->max_combined = hclge_get_max_channels(handle);
8841         ch->other_count = 1;
8842         ch->max_other = 1;
8843         ch->combined_count = handle->kinfo.rss_size;
8844 }
8845
8846 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8847                                         u16 *alloc_tqps, u16 *max_rss_size)
8848 {
8849         struct hclge_vport *vport = hclge_get_vport(handle);
8850         struct hclge_dev *hdev = vport->back;
8851
8852         *alloc_tqps = vport->alloc_tqps;
8853         *max_rss_size = hdev->rss_size_max;
8854 }
8855
8856 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8857                               bool rxfh_configured)
8858 {
8859         struct hclge_vport *vport = hclge_get_vport(handle);
8860         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8861         struct hclge_dev *hdev = vport->back;
8862         int cur_rss_size = kinfo->rss_size;
8863         int cur_tqps = kinfo->num_tqps;
8864         u16 tc_offset[HCLGE_MAX_TC_NUM];
8865         u16 tc_valid[HCLGE_MAX_TC_NUM];
8866         u16 tc_size[HCLGE_MAX_TC_NUM];
8867         u16 roundup_size;
8868         u32 *rss_indir;
8869         unsigned int i;
8870         int ret;
8871
8872         kinfo->req_rss_size = new_tqps_num;
8873
8874         ret = hclge_tm_vport_map_update(hdev);
8875         if (ret) {
8876                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8877                 return ret;
8878         }
8879
8880         roundup_size = roundup_pow_of_two(kinfo->rss_size);
8881         roundup_size = ilog2(roundup_size);
8882         /* Set the RSS TC mode according to the new RSS size */
8883         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8884                 tc_valid[i] = 0;
8885
8886                 if (!(hdev->hw_tc_map & BIT(i)))
8887                         continue;
8888
8889                 tc_valid[i] = 1;
8890                 tc_size[i] = roundup_size;
8891                 tc_offset[i] = kinfo->rss_size * i;
8892         }
8893         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8894         if (ret)
8895                 return ret;
8896
8897         /* RSS indirection table has been configuared by user */
8898         if (rxfh_configured)
8899                 goto out;
8900
8901         /* Reinitializes the rss indirect table according to the new RSS size */
8902         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8903         if (!rss_indir)
8904                 return -ENOMEM;
8905
8906         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8907                 rss_indir[i] = i % kinfo->rss_size;
8908
8909         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8910         if (ret)
8911                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8912                         ret);
8913
8914         kfree(rss_indir);
8915
8916 out:
8917         if (!ret)
8918                 dev_info(&hdev->pdev->dev,
8919                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8920                          cur_rss_size, kinfo->rss_size,
8921                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
8922
8923         return ret;
8924 }
8925
8926 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8927                               u32 *regs_num_64_bit)
8928 {
8929         struct hclge_desc desc;
8930         u32 total_num;
8931         int ret;
8932
8933         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8934         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8935         if (ret) {
8936                 dev_err(&hdev->pdev->dev,
8937                         "Query register number cmd failed, ret = %d.\n", ret);
8938                 return ret;
8939         }
8940
8941         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8942         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8943
8944         total_num = *regs_num_32_bit + *regs_num_64_bit;
8945         if (!total_num)
8946                 return -EINVAL;
8947
8948         return 0;
8949 }
8950
8951 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8952                                  void *data)
8953 {
8954 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8955 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
8956
8957         struct hclge_desc *desc;
8958         u32 *reg_val = data;
8959         __le32 *desc_data;
8960         int nodata_num;
8961         int cmd_num;
8962         int i, k, n;
8963         int ret;
8964
8965         if (regs_num == 0)
8966                 return 0;
8967
8968         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
8969         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
8970                                HCLGE_32_BIT_REG_RTN_DATANUM);
8971         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8972         if (!desc)
8973                 return -ENOMEM;
8974
8975         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8976         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8977         if (ret) {
8978                 dev_err(&hdev->pdev->dev,
8979                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8980                 kfree(desc);
8981                 return ret;
8982         }
8983
8984         for (i = 0; i < cmd_num; i++) {
8985                 if (i == 0) {
8986                         desc_data = (__le32 *)(&desc[i].data[0]);
8987                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
8988                 } else {
8989                         desc_data = (__le32 *)(&desc[i]);
8990                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8991                 }
8992                 for (k = 0; k < n; k++) {
8993                         *reg_val++ = le32_to_cpu(*desc_data++);
8994
8995                         regs_num--;
8996                         if (!regs_num)
8997                                 break;
8998                 }
8999         }
9000
9001         kfree(desc);
9002         return 0;
9003 }
9004
9005 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9006                                  void *data)
9007 {
9008 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9009 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9010
9011         struct hclge_desc *desc;
9012         u64 *reg_val = data;
9013         __le64 *desc_data;
9014         int nodata_len;
9015         int cmd_num;
9016         int i, k, n;
9017         int ret;
9018
9019         if (regs_num == 0)
9020                 return 0;
9021
9022         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9023         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9024                                HCLGE_64_BIT_REG_RTN_DATANUM);
9025         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9026         if (!desc)
9027                 return -ENOMEM;
9028
9029         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9030         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9031         if (ret) {
9032                 dev_err(&hdev->pdev->dev,
9033                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
9034                 kfree(desc);
9035                 return ret;
9036         }
9037
9038         for (i = 0; i < cmd_num; i++) {
9039                 if (i == 0) {
9040                         desc_data = (__le64 *)(&desc[i].data[0]);
9041                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9042                 } else {
9043                         desc_data = (__le64 *)(&desc[i]);
9044                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
9045                 }
9046                 for (k = 0; k < n; k++) {
9047                         *reg_val++ = le64_to_cpu(*desc_data++);
9048
9049                         regs_num--;
9050                         if (!regs_num)
9051                                 break;
9052                 }
9053         }
9054
9055         kfree(desc);
9056         return 0;
9057 }
9058
9059 #define MAX_SEPARATE_NUM        4
9060 #define SEPARATOR_VALUE         0xFFFFFFFF
9061 #define REG_NUM_PER_LINE        4
9062 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
9063
9064 static int hclge_get_regs_len(struct hnae3_handle *handle)
9065 {
9066         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9067         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9068         struct hclge_vport *vport = hclge_get_vport(handle);
9069         struct hclge_dev *hdev = vport->back;
9070         u32 regs_num_32_bit, regs_num_64_bit;
9071         int ret;
9072
9073         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9074         if (ret) {
9075                 dev_err(&hdev->pdev->dev,
9076                         "Get register number failed, ret = %d.\n", ret);
9077                 return -EOPNOTSUPP;
9078         }
9079
9080         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9081         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9082         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9083         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9084
9085         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9086                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9087                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9088 }
9089
9090 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9091                            void *data)
9092 {
9093         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9094         struct hclge_vport *vport = hclge_get_vport(handle);
9095         struct hclge_dev *hdev = vport->back;
9096         u32 regs_num_32_bit, regs_num_64_bit;
9097         int i, j, reg_um, separator_num;
9098         u32 *reg = data;
9099         int ret;
9100
9101         *version = hdev->fw_version;
9102
9103         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9104         if (ret) {
9105                 dev_err(&hdev->pdev->dev,
9106                         "Get register number failed, ret = %d.\n", ret);
9107                 return;
9108         }
9109
9110         /* fetching per-PF registers valus from PF PCIe register space */
9111         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9112         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9113         for (i = 0; i < reg_um; i++)
9114                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9115         for (i = 0; i < separator_num; i++)
9116                 *reg++ = SEPARATOR_VALUE;
9117
9118         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9119         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9120         for (i = 0; i < reg_um; i++)
9121                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9122         for (i = 0; i < separator_num; i++)
9123                 *reg++ = SEPARATOR_VALUE;
9124
9125         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9126         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9127         for (j = 0; j < kinfo->num_tqps; j++) {
9128                 for (i = 0; i < reg_um; i++)
9129                         *reg++ = hclge_read_dev(&hdev->hw,
9130                                                 ring_reg_addr_list[i] +
9131                                                 0x200 * j);
9132                 for (i = 0; i < separator_num; i++)
9133                         *reg++ = SEPARATOR_VALUE;
9134         }
9135
9136         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9137         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9138         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9139                 for (i = 0; i < reg_um; i++)
9140                         *reg++ = hclge_read_dev(&hdev->hw,
9141                                                 tqp_intr_reg_addr_list[i] +
9142                                                 4 * j);
9143                 for (i = 0; i < separator_num; i++)
9144                         *reg++ = SEPARATOR_VALUE;
9145         }
9146
9147         /* fetching PF common registers values from firmware */
9148         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9149         if (ret) {
9150                 dev_err(&hdev->pdev->dev,
9151                         "Get 32 bit register failed, ret = %d.\n", ret);
9152                 return;
9153         }
9154
9155         reg += regs_num_32_bit;
9156         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9157         if (ret)
9158                 dev_err(&hdev->pdev->dev,
9159                         "Get 64 bit register failed, ret = %d.\n", ret);
9160 }
9161
9162 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9163 {
9164         struct hclge_set_led_state_cmd *req;
9165         struct hclge_desc desc;
9166         int ret;
9167
9168         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9169
9170         req = (struct hclge_set_led_state_cmd *)desc.data;
9171         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9172                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9173
9174         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9175         if (ret)
9176                 dev_err(&hdev->pdev->dev,
9177                         "Send set led state cmd error, ret =%d\n", ret);
9178
9179         return ret;
9180 }
9181
9182 enum hclge_led_status {
9183         HCLGE_LED_OFF,
9184         HCLGE_LED_ON,
9185         HCLGE_LED_NO_CHANGE = 0xFF,
9186 };
9187
9188 static int hclge_set_led_id(struct hnae3_handle *handle,
9189                             enum ethtool_phys_id_state status)
9190 {
9191         struct hclge_vport *vport = hclge_get_vport(handle);
9192         struct hclge_dev *hdev = vport->back;
9193
9194         switch (status) {
9195         case ETHTOOL_ID_ACTIVE:
9196                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9197         case ETHTOOL_ID_INACTIVE:
9198                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9199         default:
9200                 return -EINVAL;
9201         }
9202 }
9203
9204 static void hclge_get_link_mode(struct hnae3_handle *handle,
9205                                 unsigned long *supported,
9206                                 unsigned long *advertising)
9207 {
9208         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9209         struct hclge_vport *vport = hclge_get_vport(handle);
9210         struct hclge_dev *hdev = vport->back;
9211         unsigned int idx = 0;
9212
9213         for (; idx < size; idx++) {
9214                 supported[idx] = hdev->hw.mac.supported[idx];
9215                 advertising[idx] = hdev->hw.mac.advertising[idx];
9216         }
9217 }
9218
9219 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9220 {
9221         struct hclge_vport *vport = hclge_get_vport(handle);
9222         struct hclge_dev *hdev = vport->back;
9223
9224         return hclge_config_gro(hdev, enable);
9225 }
9226
9227 static const struct hnae3_ae_ops hclge_ops = {
9228         .init_ae_dev = hclge_init_ae_dev,
9229         .uninit_ae_dev = hclge_uninit_ae_dev,
9230         .flr_prepare = hclge_flr_prepare,
9231         .flr_done = hclge_flr_done,
9232         .init_client_instance = hclge_init_client_instance,
9233         .uninit_client_instance = hclge_uninit_client_instance,
9234         .map_ring_to_vector = hclge_map_ring_to_vector,
9235         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9236         .get_vector = hclge_get_vector,
9237         .put_vector = hclge_put_vector,
9238         .set_promisc_mode = hclge_set_promisc_mode,
9239         .set_loopback = hclge_set_loopback,
9240         .start = hclge_ae_start,
9241         .stop = hclge_ae_stop,
9242         .client_start = hclge_client_start,
9243         .client_stop = hclge_client_stop,
9244         .get_status = hclge_get_status,
9245         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9246         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9247         .get_media_type = hclge_get_media_type,
9248         .check_port_speed = hclge_check_port_speed,
9249         .get_fec = hclge_get_fec,
9250         .set_fec = hclge_set_fec,
9251         .get_rss_key_size = hclge_get_rss_key_size,
9252         .get_rss_indir_size = hclge_get_rss_indir_size,
9253         .get_rss = hclge_get_rss,
9254         .set_rss = hclge_set_rss,
9255         .set_rss_tuple = hclge_set_rss_tuple,
9256         .get_rss_tuple = hclge_get_rss_tuple,
9257         .get_tc_size = hclge_get_tc_size,
9258         .get_mac_addr = hclge_get_mac_addr,
9259         .set_mac_addr = hclge_set_mac_addr,
9260         .do_ioctl = hclge_do_ioctl,
9261         .add_uc_addr = hclge_add_uc_addr,
9262         .rm_uc_addr = hclge_rm_uc_addr,
9263         .add_mc_addr = hclge_add_mc_addr,
9264         .rm_mc_addr = hclge_rm_mc_addr,
9265         .set_autoneg = hclge_set_autoneg,
9266         .get_autoneg = hclge_get_autoneg,
9267         .restart_autoneg = hclge_restart_autoneg,
9268         .get_pauseparam = hclge_get_pauseparam,
9269         .set_pauseparam = hclge_set_pauseparam,
9270         .set_mtu = hclge_set_mtu,
9271         .reset_queue = hclge_reset_tqp,
9272         .get_stats = hclge_get_stats,
9273         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9274         .update_stats = hclge_update_stats,
9275         .get_strings = hclge_get_strings,
9276         .get_sset_count = hclge_get_sset_count,
9277         .get_fw_version = hclge_get_fw_version,
9278         .get_mdix_mode = hclge_get_mdix_mode,
9279         .enable_vlan_filter = hclge_enable_vlan_filter,
9280         .set_vlan_filter = hclge_set_vlan_filter,
9281         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9282         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9283         .reset_event = hclge_reset_event,
9284         .get_reset_level = hclge_get_reset_level,
9285         .set_default_reset_request = hclge_set_def_reset_request,
9286         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9287         .set_channels = hclge_set_channels,
9288         .get_channels = hclge_get_channels,
9289         .get_regs_len = hclge_get_regs_len,
9290         .get_regs = hclge_get_regs,
9291         .set_led_id = hclge_set_led_id,
9292         .get_link_mode = hclge_get_link_mode,
9293         .add_fd_entry = hclge_add_fd_entry,
9294         .del_fd_entry = hclge_del_fd_entry,
9295         .del_all_fd_entries = hclge_del_all_fd_entries,
9296         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9297         .get_fd_rule_info = hclge_get_fd_rule_info,
9298         .get_fd_all_rules = hclge_get_all_rules,
9299         .restore_fd_rules = hclge_restore_fd_entries,
9300         .enable_fd = hclge_enable_fd,
9301         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9302         .dbg_run_cmd = hclge_dbg_run_cmd,
9303         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9304         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9305         .ae_dev_resetting = hclge_ae_dev_resetting,
9306         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9307         .set_gro_en = hclge_gro_en,
9308         .get_global_queue_id = hclge_covert_handle_qid_global,
9309         .set_timer_task = hclge_set_timer_task,
9310         .mac_connect_phy = hclge_mac_connect_phy,
9311         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9312         .restore_vlan_table = hclge_restore_vlan_table,
9313 };
9314
9315 static struct hnae3_ae_algo ae_algo = {
9316         .ops = &hclge_ops,
9317         .pdev_id_table = ae_algo_pci_tbl,
9318 };
9319
9320 static int hclge_init(void)
9321 {
9322         pr_info("%s is initializing\n", HCLGE_NAME);
9323
9324         hnae3_register_ae_algo(&ae_algo);
9325
9326         return 0;
9327 }
9328
9329 static void hclge_exit(void)
9330 {
9331         hnae3_unregister_ae_algo(&ae_algo);
9332 }
9333 module_init(hclge_init);
9334 module_exit(hclge_exit);
9335
9336 MODULE_LICENSE("GPL");
9337 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9338 MODULE_DESCRIPTION("HCLGE Driver");
9339 MODULE_VERSION(HCLGE_MOD_VERSION);