net: hns3: Adds support to dump(using ethool-d) PCIe regs in HNS3 PF driver
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
21 #include "hclge_tm.h"
22 #include "hclge_err.h"
23 #include "hnae3.h"
24
25 #define HCLGE_NAME                      "hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
28
29 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
30 static int hclge_init_vlan_config(struct hclge_dev *hdev);
31 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
32 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
33                                u16 *allocated_size, bool is_alloc);
34
35 static struct hnae3_ae_algo ae_algo;
36
37 static const struct pci_device_id ae_algo_pci_tbl[] = {
38         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
39         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
40         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
41         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
42         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
43         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
45         /* required last entry */
46         {0, }
47 };
48
49 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
50
51 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
52                                          HCLGE_CMDQ_TX_ADDR_H_REG,
53                                          HCLGE_CMDQ_TX_DEPTH_REG,
54                                          HCLGE_CMDQ_TX_TAIL_REG,
55                                          HCLGE_CMDQ_TX_HEAD_REG,
56                                          HCLGE_CMDQ_RX_ADDR_L_REG,
57                                          HCLGE_CMDQ_RX_ADDR_H_REG,
58                                          HCLGE_CMDQ_RX_DEPTH_REG,
59                                          HCLGE_CMDQ_RX_TAIL_REG,
60                                          HCLGE_CMDQ_RX_HEAD_REG,
61                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
62                                          HCLGE_CMDQ_INTR_STS_REG,
63                                          HCLGE_CMDQ_INTR_EN_REG,
64                                          HCLGE_CMDQ_INTR_GEN_REG};
65
66 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
67                                            HCLGE_VECTOR0_OTER_EN_REG,
68                                            HCLGE_MISC_RESET_STS_REG,
69                                            HCLGE_MISC_VECTOR_INT_STS,
70                                            HCLGE_GLOBAL_RESET_REG,
71                                            HCLGE_FUN_RST_ING,
72                                            HCLGE_GRO_EN_REG};
73
74 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
75                                          HCLGE_RING_RX_ADDR_H_REG,
76                                          HCLGE_RING_RX_BD_NUM_REG,
77                                          HCLGE_RING_RX_BD_LENGTH_REG,
78                                          HCLGE_RING_RX_MERGE_EN_REG,
79                                          HCLGE_RING_RX_TAIL_REG,
80                                          HCLGE_RING_RX_HEAD_REG,
81                                          HCLGE_RING_RX_FBD_NUM_REG,
82                                          HCLGE_RING_RX_OFFSET_REG,
83                                          HCLGE_RING_RX_FBD_OFFSET_REG,
84                                          HCLGE_RING_RX_STASH_REG,
85                                          HCLGE_RING_RX_BD_ERR_REG,
86                                          HCLGE_RING_TX_ADDR_L_REG,
87                                          HCLGE_RING_TX_ADDR_H_REG,
88                                          HCLGE_RING_TX_BD_NUM_REG,
89                                          HCLGE_RING_TX_PRIORITY_REG,
90                                          HCLGE_RING_TX_TC_REG,
91                                          HCLGE_RING_TX_MERGE_EN_REG,
92                                          HCLGE_RING_TX_TAIL_REG,
93                                          HCLGE_RING_TX_HEAD_REG,
94                                          HCLGE_RING_TX_FBD_NUM_REG,
95                                          HCLGE_RING_TX_OFFSET_REG,
96                                          HCLGE_RING_TX_EBD_NUM_REG,
97                                          HCLGE_RING_TX_EBD_OFFSET_REG,
98                                          HCLGE_RING_TX_BD_ERR_REG,
99                                          HCLGE_RING_EN_REG};
100
101 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
102                                              HCLGE_TQP_INTR_GL0_REG,
103                                              HCLGE_TQP_INTR_GL1_REG,
104                                              HCLGE_TQP_INTR_GL2_REG,
105                                              HCLGE_TQP_INTR_RL_REG};
106
107 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
108         "App    Loopback test",
109         "Serdes serial Loopback test",
110         "Serdes parallel Loopback test",
111         "Phy    Loopback test"
112 };
113
114 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
115         {"mac_tx_mac_pause_num",
116                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
117         {"mac_rx_mac_pause_num",
118                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
119         {"mac_tx_pfc_pri0_pkt_num",
120                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
121         {"mac_tx_pfc_pri1_pkt_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
123         {"mac_tx_pfc_pri2_pkt_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
125         {"mac_tx_pfc_pri3_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
127         {"mac_tx_pfc_pri4_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
129         {"mac_tx_pfc_pri5_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
131         {"mac_tx_pfc_pri6_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
133         {"mac_tx_pfc_pri7_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
135         {"mac_rx_pfc_pri0_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
137         {"mac_rx_pfc_pri1_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
139         {"mac_rx_pfc_pri2_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
141         {"mac_rx_pfc_pri3_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
143         {"mac_rx_pfc_pri4_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
145         {"mac_rx_pfc_pri5_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
147         {"mac_rx_pfc_pri6_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
149         {"mac_rx_pfc_pri7_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
151         {"mac_tx_total_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
153         {"mac_tx_total_oct_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
155         {"mac_tx_good_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
157         {"mac_tx_bad_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
159         {"mac_tx_good_oct_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
161         {"mac_tx_bad_oct_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
163         {"mac_tx_uni_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
165         {"mac_tx_multi_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
167         {"mac_tx_broad_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
169         {"mac_tx_undersize_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
171         {"mac_tx_oversize_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
173         {"mac_tx_64_oct_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
175         {"mac_tx_65_127_oct_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
177         {"mac_tx_128_255_oct_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
179         {"mac_tx_256_511_oct_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
181         {"mac_tx_512_1023_oct_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
183         {"mac_tx_1024_1518_oct_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
185         {"mac_tx_1519_2047_oct_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
187         {"mac_tx_2048_4095_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
189         {"mac_tx_4096_8191_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
191         {"mac_tx_8192_9216_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
193         {"mac_tx_9217_12287_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
195         {"mac_tx_12288_16383_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
197         {"mac_tx_1519_max_good_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
199         {"mac_tx_1519_max_bad_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
201         {"mac_rx_total_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
203         {"mac_rx_total_oct_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
205         {"mac_rx_good_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
207         {"mac_rx_bad_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
209         {"mac_rx_good_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
211         {"mac_rx_bad_oct_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
213         {"mac_rx_uni_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
215         {"mac_rx_multi_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
217         {"mac_rx_broad_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
219         {"mac_rx_undersize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
221         {"mac_rx_oversize_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
223         {"mac_rx_64_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
225         {"mac_rx_65_127_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
227         {"mac_rx_128_255_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
229         {"mac_rx_256_511_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
231         {"mac_rx_512_1023_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
233         {"mac_rx_1024_1518_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
235         {"mac_rx_1519_2047_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
237         {"mac_rx_2048_4095_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
239         {"mac_rx_4096_8191_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
241         {"mac_rx_8192_9216_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
243         {"mac_rx_9217_12287_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
245         {"mac_rx_12288_16383_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
247         {"mac_rx_1519_max_good_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
249         {"mac_rx_1519_max_bad_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
251
252         {"mac_tx_fragment_pkt_num",
253                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
254         {"mac_tx_undermin_pkt_num",
255                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
256         {"mac_tx_jabber_pkt_num",
257                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
258         {"mac_tx_err_all_pkt_num",
259                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
260         {"mac_tx_from_app_good_pkt_num",
261                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
262         {"mac_tx_from_app_bad_pkt_num",
263                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
264         {"mac_rx_fragment_pkt_num",
265                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
266         {"mac_rx_undermin_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
268         {"mac_rx_jabber_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
270         {"mac_rx_fcs_err_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
272         {"mac_rx_send_app_good_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
274         {"mac_rx_send_app_bad_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
276 };
277
278 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
279         {
280                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
281                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
282                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
283                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
284                 .i_port_bitmap = 0x1,
285         },
286 };
287
288 static int hclge_mac_update_stats(struct hclge_dev *hdev)
289 {
290 #define HCLGE_MAC_CMD_NUM 21
291 #define HCLGE_RTN_DATA_NUM 4
292
293         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
294         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
295         __le64 *desc_data;
296         int i, k, n;
297         int ret;
298
299         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
300         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
301         if (ret) {
302                 dev_err(&hdev->pdev->dev,
303                         "Get MAC pkt stats fail, status = %d.\n", ret);
304
305                 return ret;
306         }
307
308         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
309                 if (unlikely(i == 0)) {
310                         desc_data = (__le64 *)(&desc[i].data[0]);
311                         n = HCLGE_RTN_DATA_NUM - 2;
312                 } else {
313                         desc_data = (__le64 *)(&desc[i]);
314                         n = HCLGE_RTN_DATA_NUM;
315                 }
316                 for (k = 0; k < n; k++) {
317                         *data++ += le64_to_cpu(*desc_data);
318                         desc_data++;
319                 }
320         }
321
322         return 0;
323 }
324
325 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
326 {
327         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
328         struct hclge_vport *vport = hclge_get_vport(handle);
329         struct hclge_dev *hdev = vport->back;
330         struct hnae3_queue *queue;
331         struct hclge_desc desc[1];
332         struct hclge_tqp *tqp;
333         int ret, i;
334
335         for (i = 0; i < kinfo->num_tqps; i++) {
336                 queue = handle->kinfo.tqp[i];
337                 tqp = container_of(queue, struct hclge_tqp, q);
338                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
339                 hclge_cmd_setup_basic_desc(&desc[0],
340                                            HCLGE_OPC_QUERY_RX_STATUS,
341                                            true);
342
343                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
344                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
345                 if (ret) {
346                         dev_err(&hdev->pdev->dev,
347                                 "Query tqp stat fail, status = %d,queue = %d\n",
348                                 ret,    i);
349                         return ret;
350                 }
351                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
352                         le32_to_cpu(desc[0].data[1]);
353         }
354
355         for (i = 0; i < kinfo->num_tqps; i++) {
356                 queue = handle->kinfo.tqp[i];
357                 tqp = container_of(queue, struct hclge_tqp, q);
358                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
359                 hclge_cmd_setup_basic_desc(&desc[0],
360                                            HCLGE_OPC_QUERY_TX_STATUS,
361                                            true);
362
363                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
364                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
365                 if (ret) {
366                         dev_err(&hdev->pdev->dev,
367                                 "Query tqp stat fail, status = %d,queue = %d\n",
368                                 ret, i);
369                         return ret;
370                 }
371                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
372                         le32_to_cpu(desc[0].data[1]);
373         }
374
375         return 0;
376 }
377
378 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
379 {
380         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
381         struct hclge_tqp *tqp;
382         u64 *buff = data;
383         int i;
384
385         for (i = 0; i < kinfo->num_tqps; i++) {
386                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
387                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
388         }
389
390         for (i = 0; i < kinfo->num_tqps; i++) {
391                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
392                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
393         }
394
395         return buff;
396 }
397
398 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
399 {
400         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
401
402         return kinfo->num_tqps * (2);
403 }
404
405 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
406 {
407         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
408         u8 *buff = data;
409         int i = 0;
410
411         for (i = 0; i < kinfo->num_tqps; i++) {
412                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
413                         struct hclge_tqp, q);
414                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
415                          tqp->index);
416                 buff = buff + ETH_GSTRING_LEN;
417         }
418
419         for (i = 0; i < kinfo->num_tqps; i++) {
420                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
421                         struct hclge_tqp, q);
422                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
423                          tqp->index);
424                 buff = buff + ETH_GSTRING_LEN;
425         }
426
427         return buff;
428 }
429
430 static u64 *hclge_comm_get_stats(void *comm_stats,
431                                  const struct hclge_comm_stats_str strs[],
432                                  int size, u64 *data)
433 {
434         u64 *buf = data;
435         u32 i;
436
437         for (i = 0; i < size; i++)
438                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
439
440         return buf + size;
441 }
442
443 static u8 *hclge_comm_get_strings(u32 stringset,
444                                   const struct hclge_comm_stats_str strs[],
445                                   int size, u8 *data)
446 {
447         char *buff = (char *)data;
448         u32 i;
449
450         if (stringset != ETH_SS_STATS)
451                 return buff;
452
453         for (i = 0; i < size; i++) {
454                 snprintf(buff, ETH_GSTRING_LEN,
455                          strs[i].desc);
456                 buff = buff + ETH_GSTRING_LEN;
457         }
458
459         return (u8 *)buff;
460 }
461
462 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
463                                  struct net_device_stats *net_stats)
464 {
465         net_stats->tx_dropped = 0;
466         net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
467         net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
468         net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
469
470         net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
471         net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
472
473         net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
474         net_stats->rx_length_errors =
475                 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
476         net_stats->rx_length_errors +=
477                 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
478         net_stats->rx_over_errors =
479                 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
480 }
481
482 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
483 {
484         struct hnae3_handle *handle;
485         int status;
486
487         handle = &hdev->vport[0].nic;
488         if (handle->client) {
489                 status = hclge_tqps_update_stats(handle);
490                 if (status) {
491                         dev_err(&hdev->pdev->dev,
492                                 "Update TQPS stats fail, status = %d.\n",
493                                 status);
494                 }
495         }
496
497         status = hclge_mac_update_stats(hdev);
498         if (status)
499                 dev_err(&hdev->pdev->dev,
500                         "Update MAC stats fail, status = %d.\n", status);
501
502         hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
503 }
504
505 static void hclge_update_stats(struct hnae3_handle *handle,
506                                struct net_device_stats *net_stats)
507 {
508         struct hclge_vport *vport = hclge_get_vport(handle);
509         struct hclge_dev *hdev = vport->back;
510         struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
511         int status;
512
513         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
514                 return;
515
516         status = hclge_mac_update_stats(hdev);
517         if (status)
518                 dev_err(&hdev->pdev->dev,
519                         "Update MAC stats fail, status = %d.\n",
520                         status);
521
522         status = hclge_tqps_update_stats(handle);
523         if (status)
524                 dev_err(&hdev->pdev->dev,
525                         "Update TQPS stats fail, status = %d.\n",
526                         status);
527
528         hclge_update_netstat(hw_stats, net_stats);
529
530         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
531 }
532
533 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
534 {
535 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
536                 HNAE3_SUPPORT_PHY_LOOPBACK |\
537                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
538                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
539
540         struct hclge_vport *vport = hclge_get_vport(handle);
541         struct hclge_dev *hdev = vport->back;
542         int count = 0;
543
544         /* Loopback test support rules:
545          * mac: only GE mode support
546          * serdes: all mac mode will support include GE/XGE/LGE/CGE
547          * phy: only support when phy device exist on board
548          */
549         if (stringset == ETH_SS_TEST) {
550                 /* clear loopback bit flags at first */
551                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
552                 if (hdev->pdev->revision >= 0x21 ||
553                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
554                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
555                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
556                         count += 1;
557                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
558                 }
559
560                 count += 2;
561                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
562                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
563         } else if (stringset == ETH_SS_STATS) {
564                 count = ARRAY_SIZE(g_mac_stats_string) +
565                         hclge_tqps_get_sset_count(handle, stringset);
566         }
567
568         return count;
569 }
570
571 static void hclge_get_strings(struct hnae3_handle *handle,
572                               u32 stringset,
573                               u8 *data)
574 {
575         u8 *p = (char *)data;
576         int size;
577
578         if (stringset == ETH_SS_STATS) {
579                 size = ARRAY_SIZE(g_mac_stats_string);
580                 p = hclge_comm_get_strings(stringset,
581                                            g_mac_stats_string,
582                                            size,
583                                            p);
584                 p = hclge_tqps_get_strings(handle, p);
585         } else if (stringset == ETH_SS_TEST) {
586                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
587                         memcpy(p,
588                                hns3_nic_test_strs[HNAE3_LOOP_APP],
589                                ETH_GSTRING_LEN);
590                         p += ETH_GSTRING_LEN;
591                 }
592                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
593                         memcpy(p,
594                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
595                                ETH_GSTRING_LEN);
596                         p += ETH_GSTRING_LEN;
597                 }
598                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
599                         memcpy(p,
600                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
601                                ETH_GSTRING_LEN);
602                         p += ETH_GSTRING_LEN;
603                 }
604                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
605                         memcpy(p,
606                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
607                                ETH_GSTRING_LEN);
608                         p += ETH_GSTRING_LEN;
609                 }
610         }
611 }
612
613 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
614 {
615         struct hclge_vport *vport = hclge_get_vport(handle);
616         struct hclge_dev *hdev = vport->back;
617         u64 *p;
618
619         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
620                                  g_mac_stats_string,
621                                  ARRAY_SIZE(g_mac_stats_string),
622                                  data);
623         p = hclge_tqps_get_stats(handle, p);
624 }
625
626 static int hclge_parse_func_status(struct hclge_dev *hdev,
627                                    struct hclge_func_status_cmd *status)
628 {
629         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
630                 return -EINVAL;
631
632         /* Set the pf to main pf */
633         if (status->pf_state & HCLGE_PF_STATE_MAIN)
634                 hdev->flag |= HCLGE_FLAG_MAIN;
635         else
636                 hdev->flag &= ~HCLGE_FLAG_MAIN;
637
638         return 0;
639 }
640
641 static int hclge_query_function_status(struct hclge_dev *hdev)
642 {
643         struct hclge_func_status_cmd *req;
644         struct hclge_desc desc;
645         int timeout = 0;
646         int ret;
647
648         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
649         req = (struct hclge_func_status_cmd *)desc.data;
650
651         do {
652                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
653                 if (ret) {
654                         dev_err(&hdev->pdev->dev,
655                                 "query function status failed %d.\n",
656                                 ret);
657
658                         return ret;
659                 }
660
661                 /* Check pf reset is done */
662                 if (req->pf_state)
663                         break;
664                 usleep_range(1000, 2000);
665         } while (timeout++ < 5);
666
667         ret = hclge_parse_func_status(hdev, req);
668
669         return ret;
670 }
671
672 static int hclge_query_pf_resource(struct hclge_dev *hdev)
673 {
674         struct hclge_pf_res_cmd *req;
675         struct hclge_desc desc;
676         int ret;
677
678         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
679         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
680         if (ret) {
681                 dev_err(&hdev->pdev->dev,
682                         "query pf resource failed %d.\n", ret);
683                 return ret;
684         }
685
686         req = (struct hclge_pf_res_cmd *)desc.data;
687         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
688         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
689
690         if (hnae3_dev_roce_supported(hdev)) {
691                 hdev->roce_base_msix_offset =
692                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
693                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
694                 hdev->num_roce_msi =
695                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
696                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
697
698                 /* PF should have NIC vectors and Roce vectors,
699                  * NIC vectors are queued before Roce vectors.
700                  */
701                 hdev->num_msi = hdev->num_roce_msi  +
702                                 hdev->roce_base_msix_offset;
703         } else {
704                 hdev->num_msi =
705                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
706                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
707         }
708
709         return 0;
710 }
711
712 static int hclge_parse_speed(int speed_cmd, int *speed)
713 {
714         switch (speed_cmd) {
715         case 6:
716                 *speed = HCLGE_MAC_SPEED_10M;
717                 break;
718         case 7:
719                 *speed = HCLGE_MAC_SPEED_100M;
720                 break;
721         case 0:
722                 *speed = HCLGE_MAC_SPEED_1G;
723                 break;
724         case 1:
725                 *speed = HCLGE_MAC_SPEED_10G;
726                 break;
727         case 2:
728                 *speed = HCLGE_MAC_SPEED_25G;
729                 break;
730         case 3:
731                 *speed = HCLGE_MAC_SPEED_40G;
732                 break;
733         case 4:
734                 *speed = HCLGE_MAC_SPEED_50G;
735                 break;
736         case 5:
737                 *speed = HCLGE_MAC_SPEED_100G;
738                 break;
739         default:
740                 return -EINVAL;
741         }
742
743         return 0;
744 }
745
746 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
747                                         u8 speed_ability)
748 {
749         unsigned long *supported = hdev->hw.mac.supported;
750
751         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
752                 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
753                         supported);
754
755         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
756                 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
757                         supported);
758
759         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
760                 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
761                         supported);
762
763         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
764                 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
765                         supported);
766
767         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
768                 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
769                         supported);
770
771         set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
772         set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
773 }
774
775 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
776 {
777         u8 media_type = hdev->hw.mac.media_type;
778
779         if (media_type != HNAE3_MEDIA_TYPE_FIBER)
780                 return;
781
782         hclge_parse_fiber_link_mode(hdev, speed_ability);
783 }
784
785 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
786 {
787         struct hclge_cfg_param_cmd *req;
788         u64 mac_addr_tmp_high;
789         u64 mac_addr_tmp;
790         int i;
791
792         req = (struct hclge_cfg_param_cmd *)desc[0].data;
793
794         /* get the configuration */
795         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
796                                               HCLGE_CFG_VMDQ_M,
797                                               HCLGE_CFG_VMDQ_S);
798         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
799                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
800         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
801                                             HCLGE_CFG_TQP_DESC_N_M,
802                                             HCLGE_CFG_TQP_DESC_N_S);
803
804         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
805                                         HCLGE_CFG_PHY_ADDR_M,
806                                         HCLGE_CFG_PHY_ADDR_S);
807         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
808                                           HCLGE_CFG_MEDIA_TP_M,
809                                           HCLGE_CFG_MEDIA_TP_S);
810         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
811                                           HCLGE_CFG_RX_BUF_LEN_M,
812                                           HCLGE_CFG_RX_BUF_LEN_S);
813         /* get mac_address */
814         mac_addr_tmp = __le32_to_cpu(req->param[2]);
815         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
816                                             HCLGE_CFG_MAC_ADDR_H_M,
817                                             HCLGE_CFG_MAC_ADDR_H_S);
818
819         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
820
821         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
822                                              HCLGE_CFG_DEFAULT_SPEED_M,
823                                              HCLGE_CFG_DEFAULT_SPEED_S);
824         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
825                                             HCLGE_CFG_RSS_SIZE_M,
826                                             HCLGE_CFG_RSS_SIZE_S);
827
828         for (i = 0; i < ETH_ALEN; i++)
829                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
830
831         req = (struct hclge_cfg_param_cmd *)desc[1].data;
832         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
833
834         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
835                                              HCLGE_CFG_SPEED_ABILITY_M,
836                                              HCLGE_CFG_SPEED_ABILITY_S);
837         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
838                                          HCLGE_CFG_UMV_TBL_SPACE_M,
839                                          HCLGE_CFG_UMV_TBL_SPACE_S);
840         if (!cfg->umv_space)
841                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
842 }
843
844 /* hclge_get_cfg: query the static parameter from flash
845  * @hdev: pointer to struct hclge_dev
846  * @hcfg: the config structure to be getted
847  */
848 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
849 {
850         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
851         struct hclge_cfg_param_cmd *req;
852         int i, ret;
853
854         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
855                 u32 offset = 0;
856
857                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
858                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
859                                            true);
860                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
861                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
862                 /* Len should be united by 4 bytes when send to hardware */
863                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
864                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
865                 req->offset = cpu_to_le32(offset);
866         }
867
868         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
869         if (ret) {
870                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
871                 return ret;
872         }
873
874         hclge_parse_cfg(hcfg, desc);
875
876         return 0;
877 }
878
879 static int hclge_get_cap(struct hclge_dev *hdev)
880 {
881         int ret;
882
883         ret = hclge_query_function_status(hdev);
884         if (ret) {
885                 dev_err(&hdev->pdev->dev,
886                         "query function status error %d.\n", ret);
887                 return ret;
888         }
889
890         /* get pf resource */
891         ret = hclge_query_pf_resource(hdev);
892         if (ret)
893                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
894
895         return ret;
896 }
897
898 static int hclge_configure(struct hclge_dev *hdev)
899 {
900         struct hclge_cfg cfg;
901         int ret, i;
902
903         ret = hclge_get_cfg(hdev, &cfg);
904         if (ret) {
905                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
906                 return ret;
907         }
908
909         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
910         hdev->base_tqp_pid = 0;
911         hdev->rss_size_max = cfg.rss_size_max;
912         hdev->rx_buf_len = cfg.rx_buf_len;
913         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
914         hdev->hw.mac.media_type = cfg.media_type;
915         hdev->hw.mac.phy_addr = cfg.phy_addr;
916         hdev->num_desc = cfg.tqp_desc_num;
917         hdev->tm_info.num_pg = 1;
918         hdev->tc_max = cfg.tc_num;
919         hdev->tm_info.hw_pfc_map = 0;
920         hdev->wanted_umv_size = cfg.umv_space;
921
922         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
923         if (ret) {
924                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
925                 return ret;
926         }
927
928         hclge_parse_link_mode(hdev, cfg.speed_ability);
929
930         if ((hdev->tc_max > HNAE3_MAX_TC) ||
931             (hdev->tc_max < 1)) {
932                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
933                          hdev->tc_max);
934                 hdev->tc_max = 1;
935         }
936
937         /* Dev does not support DCB */
938         if (!hnae3_dev_dcb_supported(hdev)) {
939                 hdev->tc_max = 1;
940                 hdev->pfc_max = 0;
941         } else {
942                 hdev->pfc_max = hdev->tc_max;
943         }
944
945         hdev->tm_info.num_tc = hdev->tc_max;
946
947         /* Currently not support uncontiuous tc */
948         for (i = 0; i < hdev->tm_info.num_tc; i++)
949                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
950
951         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
952
953         return ret;
954 }
955
956 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
957                             int tso_mss_max)
958 {
959         struct hclge_cfg_tso_status_cmd *req;
960         struct hclge_desc desc;
961         u16 tso_mss;
962
963         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
964
965         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
966
967         tso_mss = 0;
968         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
969                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
970         req->tso_mss_min = cpu_to_le16(tso_mss);
971
972         tso_mss = 0;
973         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
974                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
975         req->tso_mss_max = cpu_to_le16(tso_mss);
976
977         return hclge_cmd_send(&hdev->hw, &desc, 1);
978 }
979
980 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
981 {
982         struct hclge_cfg_gro_status_cmd *req;
983         struct hclge_desc desc;
984         int ret;
985
986         if (!hnae3_dev_gro_supported(hdev))
987                 return 0;
988
989         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
990         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
991
992         req->gro_en = cpu_to_le16(en ? 1 : 0);
993
994         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
995         if (ret)
996                 dev_err(&hdev->pdev->dev,
997                         "GRO hardware config cmd failed, ret = %d\n", ret);
998
999         return ret;
1000 }
1001
1002 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1003 {
1004         struct hclge_tqp *tqp;
1005         int i;
1006
1007         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1008                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1009         if (!hdev->htqp)
1010                 return -ENOMEM;
1011
1012         tqp = hdev->htqp;
1013
1014         for (i = 0; i < hdev->num_tqps; i++) {
1015                 tqp->dev = &hdev->pdev->dev;
1016                 tqp->index = i;
1017
1018                 tqp->q.ae_algo = &ae_algo;
1019                 tqp->q.buf_size = hdev->rx_buf_len;
1020                 tqp->q.desc_num = hdev->num_desc;
1021                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1022                         i * HCLGE_TQP_REG_SIZE;
1023
1024                 tqp++;
1025         }
1026
1027         return 0;
1028 }
1029
1030 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1031                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1032 {
1033         struct hclge_tqp_map_cmd *req;
1034         struct hclge_desc desc;
1035         int ret;
1036
1037         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1038
1039         req = (struct hclge_tqp_map_cmd *)desc.data;
1040         req->tqp_id = cpu_to_le16(tqp_pid);
1041         req->tqp_vf = func_id;
1042         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1043                         1 << HCLGE_TQP_MAP_EN_B;
1044         req->tqp_vid = cpu_to_le16(tqp_vid);
1045
1046         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1047         if (ret)
1048                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1049
1050         return ret;
1051 }
1052
1053 static int  hclge_assign_tqp(struct hclge_vport *vport)
1054 {
1055         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1056         struct hclge_dev *hdev = vport->back;
1057         int i, alloced;
1058
1059         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1060              alloced < kinfo->num_tqps; i++) {
1061                 if (!hdev->htqp[i].alloced) {
1062                         hdev->htqp[i].q.handle = &vport->nic;
1063                         hdev->htqp[i].q.tqp_index = alloced;
1064                         hdev->htqp[i].q.desc_num = kinfo->num_desc;
1065                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1066                         hdev->htqp[i].alloced = true;
1067                         alloced++;
1068                 }
1069         }
1070         vport->alloc_tqps = kinfo->num_tqps;
1071
1072         return 0;
1073 }
1074
1075 static int hclge_knic_setup(struct hclge_vport *vport,
1076                             u16 num_tqps, u16 num_desc)
1077 {
1078         struct hnae3_handle *nic = &vport->nic;
1079         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1080         struct hclge_dev *hdev = vport->back;
1081         int i, ret;
1082
1083         kinfo->num_desc = num_desc;
1084         kinfo->rx_buf_len = hdev->rx_buf_len;
1085         kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1086         kinfo->rss_size
1087                 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1088         kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1089
1090         for (i = 0; i < HNAE3_MAX_TC; i++) {
1091                 if (hdev->hw_tc_map & BIT(i)) {
1092                         kinfo->tc_info[i].enable = true;
1093                         kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1094                         kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1095                         kinfo->tc_info[i].tc = i;
1096                 } else {
1097                         /* Set to default queue if TC is disable */
1098                         kinfo->tc_info[i].enable = false;
1099                         kinfo->tc_info[i].tqp_offset = 0;
1100                         kinfo->tc_info[i].tqp_count = 1;
1101                         kinfo->tc_info[i].tc = 0;
1102                 }
1103         }
1104
1105         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1106                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1107         if (!kinfo->tqp)
1108                 return -ENOMEM;
1109
1110         ret = hclge_assign_tqp(vport);
1111         if (ret)
1112                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1113
1114         return ret;
1115 }
1116
1117 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1118                                   struct hclge_vport *vport)
1119 {
1120         struct hnae3_handle *nic = &vport->nic;
1121         struct hnae3_knic_private_info *kinfo;
1122         u16 i;
1123
1124         kinfo = &nic->kinfo;
1125         for (i = 0; i < kinfo->num_tqps; i++) {
1126                 struct hclge_tqp *q =
1127                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1128                 bool is_pf;
1129                 int ret;
1130
1131                 is_pf = !(vport->vport_id);
1132                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1133                                              i, is_pf);
1134                 if (ret)
1135                         return ret;
1136         }
1137
1138         return 0;
1139 }
1140
1141 static int hclge_map_tqp(struct hclge_dev *hdev)
1142 {
1143         struct hclge_vport *vport = hdev->vport;
1144         u16 i, num_vport;
1145
1146         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1147         for (i = 0; i < num_vport; i++) {
1148                 int ret;
1149
1150                 ret = hclge_map_tqp_to_vport(hdev, vport);
1151                 if (ret)
1152                         return ret;
1153
1154                 vport++;
1155         }
1156
1157         return 0;
1158 }
1159
1160 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1161 {
1162         /* this would be initialized later */
1163 }
1164
1165 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1166 {
1167         struct hnae3_handle *nic = &vport->nic;
1168         struct hclge_dev *hdev = vport->back;
1169         int ret;
1170
1171         nic->pdev = hdev->pdev;
1172         nic->ae_algo = &ae_algo;
1173         nic->numa_node_mask = hdev->numa_node_mask;
1174
1175         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1176                 ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1177                 if (ret) {
1178                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1179                                 ret);
1180                         return ret;
1181                 }
1182         } else {
1183                 hclge_unic_setup(vport, num_tqps);
1184         }
1185
1186         return 0;
1187 }
1188
1189 static int hclge_alloc_vport(struct hclge_dev *hdev)
1190 {
1191         struct pci_dev *pdev = hdev->pdev;
1192         struct hclge_vport *vport;
1193         u32 tqp_main_vport;
1194         u32 tqp_per_vport;
1195         int num_vport, i;
1196         int ret;
1197
1198         /* We need to alloc a vport for main NIC of PF */
1199         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1200
1201         if (hdev->num_tqps < num_vport) {
1202                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1203                         hdev->num_tqps, num_vport);
1204                 return -EINVAL;
1205         }
1206
1207         /* Alloc the same number of TQPs for every vport */
1208         tqp_per_vport = hdev->num_tqps / num_vport;
1209         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1210
1211         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1212                              GFP_KERNEL);
1213         if (!vport)
1214                 return -ENOMEM;
1215
1216         hdev->vport = vport;
1217         hdev->num_alloc_vport = num_vport;
1218
1219         if (IS_ENABLED(CONFIG_PCI_IOV))
1220                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1221
1222         for (i = 0; i < num_vport; i++) {
1223                 vport->back = hdev;
1224                 vport->vport_id = i;
1225                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1226
1227                 if (i == 0)
1228                         ret = hclge_vport_setup(vport, tqp_main_vport);
1229                 else
1230                         ret = hclge_vport_setup(vport, tqp_per_vport);
1231                 if (ret) {
1232                         dev_err(&pdev->dev,
1233                                 "vport setup failed for vport %d, %d\n",
1234                                 i, ret);
1235                         return ret;
1236                 }
1237
1238                 vport++;
1239         }
1240
1241         return 0;
1242 }
1243
1244 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1245                                     struct hclge_pkt_buf_alloc *buf_alloc)
1246 {
1247 /* TX buffer size is unit by 128 byte */
1248 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1249 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1250         struct hclge_tx_buff_alloc_cmd *req;
1251         struct hclge_desc desc;
1252         int ret;
1253         u8 i;
1254
1255         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1256
1257         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1258         for (i = 0; i < HCLGE_TC_NUM; i++) {
1259                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1260
1261                 req->tx_pkt_buff[i] =
1262                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1263                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1264         }
1265
1266         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1267         if (ret)
1268                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1269                         ret);
1270
1271         return ret;
1272 }
1273
1274 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1275                                  struct hclge_pkt_buf_alloc *buf_alloc)
1276 {
1277         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1278
1279         if (ret)
1280                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1281
1282         return ret;
1283 }
1284
1285 static int hclge_get_tc_num(struct hclge_dev *hdev)
1286 {
1287         int i, cnt = 0;
1288
1289         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1290                 if (hdev->hw_tc_map & BIT(i))
1291                         cnt++;
1292         return cnt;
1293 }
1294
1295 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1296 {
1297         int i, cnt = 0;
1298
1299         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1300                 if (hdev->hw_tc_map & BIT(i) &&
1301                     hdev->tm_info.hw_pfc_map & BIT(i))
1302                         cnt++;
1303         return cnt;
1304 }
1305
1306 /* Get the number of pfc enabled TCs, which have private buffer */
1307 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1308                                   struct hclge_pkt_buf_alloc *buf_alloc)
1309 {
1310         struct hclge_priv_buf *priv;
1311         int i, cnt = 0;
1312
1313         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1314                 priv = &buf_alloc->priv_buf[i];
1315                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1316                     priv->enable)
1317                         cnt++;
1318         }
1319
1320         return cnt;
1321 }
1322
1323 /* Get the number of pfc disabled TCs, which have private buffer */
1324 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1325                                      struct hclge_pkt_buf_alloc *buf_alloc)
1326 {
1327         struct hclge_priv_buf *priv;
1328         int i, cnt = 0;
1329
1330         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1331                 priv = &buf_alloc->priv_buf[i];
1332                 if (hdev->hw_tc_map & BIT(i) &&
1333                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1334                     priv->enable)
1335                         cnt++;
1336         }
1337
1338         return cnt;
1339 }
1340
1341 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1342 {
1343         struct hclge_priv_buf *priv;
1344         u32 rx_priv = 0;
1345         int i;
1346
1347         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1348                 priv = &buf_alloc->priv_buf[i];
1349                 if (priv->enable)
1350                         rx_priv += priv->buf_size;
1351         }
1352         return rx_priv;
1353 }
1354
1355 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1356 {
1357         u32 i, total_tx_size = 0;
1358
1359         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1360                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1361
1362         return total_tx_size;
1363 }
1364
1365 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1366                                 struct hclge_pkt_buf_alloc *buf_alloc,
1367                                 u32 rx_all)
1368 {
1369         u32 shared_buf_min, shared_buf_tc, shared_std;
1370         int tc_num, pfc_enable_num;
1371         u32 shared_buf;
1372         u32 rx_priv;
1373         int i;
1374
1375         tc_num = hclge_get_tc_num(hdev);
1376         pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1377
1378         if (hnae3_dev_dcb_supported(hdev))
1379                 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1380         else
1381                 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1382
1383         shared_buf_tc = pfc_enable_num * hdev->mps +
1384                         (tc_num - pfc_enable_num) * hdev->mps / 2 +
1385                         hdev->mps;
1386         shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1387
1388         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1389         if (rx_all <= rx_priv + shared_std)
1390                 return false;
1391
1392         shared_buf = rx_all - rx_priv;
1393         buf_alloc->s_buf.buf_size = shared_buf;
1394         buf_alloc->s_buf.self.high = shared_buf;
1395         buf_alloc->s_buf.self.low =  2 * hdev->mps;
1396
1397         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1398                 if ((hdev->hw_tc_map & BIT(i)) &&
1399                     (hdev->tm_info.hw_pfc_map & BIT(i))) {
1400                         buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1401                         buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1402                 } else {
1403                         buf_alloc->s_buf.tc_thrd[i].low = 0;
1404                         buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1405                 }
1406         }
1407
1408         return true;
1409 }
1410
1411 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1412                                 struct hclge_pkt_buf_alloc *buf_alloc)
1413 {
1414         u32 i, total_size;
1415
1416         total_size = hdev->pkt_buf_size;
1417
1418         /* alloc tx buffer for all enabled tc */
1419         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1420                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1421
1422                 if (total_size < HCLGE_DEFAULT_TX_BUF)
1423                         return -ENOMEM;
1424
1425                 if (hdev->hw_tc_map & BIT(i))
1426                         priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1427                 else
1428                         priv->tx_buf_size = 0;
1429
1430                 total_size -= priv->tx_buf_size;
1431         }
1432
1433         return 0;
1434 }
1435
1436 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1437  * @hdev: pointer to struct hclge_dev
1438  * @buf_alloc: pointer to buffer calculation data
1439  * @return: 0: calculate sucessful, negative: fail
1440  */
1441 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1442                                 struct hclge_pkt_buf_alloc *buf_alloc)
1443 {
1444 #define HCLGE_BUF_SIZE_UNIT     128
1445         u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1446         int no_pfc_priv_num, pfc_priv_num;
1447         struct hclge_priv_buf *priv;
1448         int i;
1449
1450         aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1451         rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1452
1453         /* When DCB is not supported, rx private
1454          * buffer is not allocated.
1455          */
1456         if (!hnae3_dev_dcb_supported(hdev)) {
1457                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1458                         return -ENOMEM;
1459
1460                 return 0;
1461         }
1462
1463         /* step 1, try to alloc private buffer for all enabled tc */
1464         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1465                 priv = &buf_alloc->priv_buf[i];
1466                 if (hdev->hw_tc_map & BIT(i)) {
1467                         priv->enable = 1;
1468                         if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1469                                 priv->wl.low = aligned_mps;
1470                                 priv->wl.high = priv->wl.low + aligned_mps;
1471                                 priv->buf_size = priv->wl.high +
1472                                                 HCLGE_DEFAULT_DV;
1473                         } else {
1474                                 priv->wl.low = 0;
1475                                 priv->wl.high = 2 * aligned_mps;
1476                                 priv->buf_size = priv->wl.high;
1477                         }
1478                 } else {
1479                         priv->enable = 0;
1480                         priv->wl.low = 0;
1481                         priv->wl.high = 0;
1482                         priv->buf_size = 0;
1483                 }
1484         }
1485
1486         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1487                 return 0;
1488
1489         /* step 2, try to decrease the buffer size of
1490          * no pfc TC's private buffer
1491          */
1492         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1493                 priv = &buf_alloc->priv_buf[i];
1494
1495                 priv->enable = 0;
1496                 priv->wl.low = 0;
1497                 priv->wl.high = 0;
1498                 priv->buf_size = 0;
1499
1500                 if (!(hdev->hw_tc_map & BIT(i)))
1501                         continue;
1502
1503                 priv->enable = 1;
1504
1505                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1506                         priv->wl.low = 128;
1507                         priv->wl.high = priv->wl.low + aligned_mps;
1508                         priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1509                 } else {
1510                         priv->wl.low = 0;
1511                         priv->wl.high = aligned_mps;
1512                         priv->buf_size = priv->wl.high;
1513                 }
1514         }
1515
1516         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1517                 return 0;
1518
1519         /* step 3, try to reduce the number of pfc disabled TCs,
1520          * which have private buffer
1521          */
1522         /* get the total no pfc enable TC number, which have private buffer */
1523         no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1524
1525         /* let the last to be cleared first */
1526         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1527                 priv = &buf_alloc->priv_buf[i];
1528
1529                 if (hdev->hw_tc_map & BIT(i) &&
1530                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1531                         /* Clear the no pfc TC private buffer */
1532                         priv->wl.low = 0;
1533                         priv->wl.high = 0;
1534                         priv->buf_size = 0;
1535                         priv->enable = 0;
1536                         no_pfc_priv_num--;
1537                 }
1538
1539                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1540                     no_pfc_priv_num == 0)
1541                         break;
1542         }
1543
1544         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1545                 return 0;
1546
1547         /* step 4, try to reduce the number of pfc enabled TCs
1548          * which have private buffer.
1549          */
1550         pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1551
1552         /* let the last to be cleared first */
1553         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1554                 priv = &buf_alloc->priv_buf[i];
1555
1556                 if (hdev->hw_tc_map & BIT(i) &&
1557                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1558                         /* Reduce the number of pfc TC with private buffer */
1559                         priv->wl.low = 0;
1560                         priv->enable = 0;
1561                         priv->wl.high = 0;
1562                         priv->buf_size = 0;
1563                         pfc_priv_num--;
1564                 }
1565
1566                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1567                     pfc_priv_num == 0)
1568                         break;
1569         }
1570         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1571                 return 0;
1572
1573         return -ENOMEM;
1574 }
1575
1576 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1577                                    struct hclge_pkt_buf_alloc *buf_alloc)
1578 {
1579         struct hclge_rx_priv_buff_cmd *req;
1580         struct hclge_desc desc;
1581         int ret;
1582         int i;
1583
1584         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1585         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1586
1587         /* Alloc private buffer TCs */
1588         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1589                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1590
1591                 req->buf_num[i] =
1592                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1593                 req->buf_num[i] |=
1594                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1595         }
1596
1597         req->shared_buf =
1598                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1599                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1600
1601         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1602         if (ret)
1603                 dev_err(&hdev->pdev->dev,
1604                         "rx private buffer alloc cmd failed %d\n", ret);
1605
1606         return ret;
1607 }
1608
1609 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1610                                    struct hclge_pkt_buf_alloc *buf_alloc)
1611 {
1612         struct hclge_rx_priv_wl_buf *req;
1613         struct hclge_priv_buf *priv;
1614         struct hclge_desc desc[2];
1615         int i, j;
1616         int ret;
1617
1618         for (i = 0; i < 2; i++) {
1619                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1620                                            false);
1621                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1622
1623                 /* The first descriptor set the NEXT bit to 1 */
1624                 if (i == 0)
1625                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1626                 else
1627                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1628
1629                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1630                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1631
1632                         priv = &buf_alloc->priv_buf[idx];
1633                         req->tc_wl[j].high =
1634                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1635                         req->tc_wl[j].high |=
1636                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1637                         req->tc_wl[j].low =
1638                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1639                         req->tc_wl[j].low |=
1640                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1641                 }
1642         }
1643
1644         /* Send 2 descriptor at one time */
1645         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1646         if (ret)
1647                 dev_err(&hdev->pdev->dev,
1648                         "rx private waterline config cmd failed %d\n",
1649                         ret);
1650         return ret;
1651 }
1652
1653 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1654                                     struct hclge_pkt_buf_alloc *buf_alloc)
1655 {
1656         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1657         struct hclge_rx_com_thrd *req;
1658         struct hclge_desc desc[2];
1659         struct hclge_tc_thrd *tc;
1660         int i, j;
1661         int ret;
1662
1663         for (i = 0; i < 2; i++) {
1664                 hclge_cmd_setup_basic_desc(&desc[i],
1665                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1666                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1667
1668                 /* The first descriptor set the NEXT bit to 1 */
1669                 if (i == 0)
1670                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1671                 else
1672                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1673
1674                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1675                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1676
1677                         req->com_thrd[j].high =
1678                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1679                         req->com_thrd[j].high |=
1680                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1681                         req->com_thrd[j].low =
1682                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1683                         req->com_thrd[j].low |=
1684                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1685                 }
1686         }
1687
1688         /* Send 2 descriptors at one time */
1689         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1690         if (ret)
1691                 dev_err(&hdev->pdev->dev,
1692                         "common threshold config cmd failed %d\n", ret);
1693         return ret;
1694 }
1695
1696 static int hclge_common_wl_config(struct hclge_dev *hdev,
1697                                   struct hclge_pkt_buf_alloc *buf_alloc)
1698 {
1699         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1700         struct hclge_rx_com_wl *req;
1701         struct hclge_desc desc;
1702         int ret;
1703
1704         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1705
1706         req = (struct hclge_rx_com_wl *)desc.data;
1707         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1708         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1709
1710         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1711         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1712
1713         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1714         if (ret)
1715                 dev_err(&hdev->pdev->dev,
1716                         "common waterline config cmd failed %d\n", ret);
1717
1718         return ret;
1719 }
1720
1721 int hclge_buffer_alloc(struct hclge_dev *hdev)
1722 {
1723         struct hclge_pkt_buf_alloc *pkt_buf;
1724         int ret;
1725
1726         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1727         if (!pkt_buf)
1728                 return -ENOMEM;
1729
1730         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1731         if (ret) {
1732                 dev_err(&hdev->pdev->dev,
1733                         "could not calc tx buffer size for all TCs %d\n", ret);
1734                 goto out;
1735         }
1736
1737         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1738         if (ret) {
1739                 dev_err(&hdev->pdev->dev,
1740                         "could not alloc tx buffers %d\n", ret);
1741                 goto out;
1742         }
1743
1744         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1745         if (ret) {
1746                 dev_err(&hdev->pdev->dev,
1747                         "could not calc rx priv buffer size for all TCs %d\n",
1748                         ret);
1749                 goto out;
1750         }
1751
1752         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1753         if (ret) {
1754                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1755                         ret);
1756                 goto out;
1757         }
1758
1759         if (hnae3_dev_dcb_supported(hdev)) {
1760                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1761                 if (ret) {
1762                         dev_err(&hdev->pdev->dev,
1763                                 "could not configure rx private waterline %d\n",
1764                                 ret);
1765                         goto out;
1766                 }
1767
1768                 ret = hclge_common_thrd_config(hdev, pkt_buf);
1769                 if (ret) {
1770                         dev_err(&hdev->pdev->dev,
1771                                 "could not configure common threshold %d\n",
1772                                 ret);
1773                         goto out;
1774                 }
1775         }
1776
1777         ret = hclge_common_wl_config(hdev, pkt_buf);
1778         if (ret)
1779                 dev_err(&hdev->pdev->dev,
1780                         "could not configure common waterline %d\n", ret);
1781
1782 out:
1783         kfree(pkt_buf);
1784         return ret;
1785 }
1786
1787 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1788 {
1789         struct hnae3_handle *roce = &vport->roce;
1790         struct hnae3_handle *nic = &vport->nic;
1791
1792         roce->rinfo.num_vectors = vport->back->num_roce_msi;
1793
1794         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1795             vport->back->num_msi_left == 0)
1796                 return -EINVAL;
1797
1798         roce->rinfo.base_vector = vport->back->roce_base_vector;
1799
1800         roce->rinfo.netdev = nic->kinfo.netdev;
1801         roce->rinfo.roce_io_base = vport->back->hw.io_base;
1802
1803         roce->pdev = nic->pdev;
1804         roce->ae_algo = nic->ae_algo;
1805         roce->numa_node_mask = nic->numa_node_mask;
1806
1807         return 0;
1808 }
1809
1810 static int hclge_init_msi(struct hclge_dev *hdev)
1811 {
1812         struct pci_dev *pdev = hdev->pdev;
1813         int vectors;
1814         int i;
1815
1816         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1817                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
1818         if (vectors < 0) {
1819                 dev_err(&pdev->dev,
1820                         "failed(%d) to allocate MSI/MSI-X vectors\n",
1821                         vectors);
1822                 return vectors;
1823         }
1824         if (vectors < hdev->num_msi)
1825                 dev_warn(&hdev->pdev->dev,
1826                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1827                          hdev->num_msi, vectors);
1828
1829         hdev->num_msi = vectors;
1830         hdev->num_msi_left = vectors;
1831         hdev->base_msi_vector = pdev->irq;
1832         hdev->roce_base_vector = hdev->base_msi_vector +
1833                                 hdev->roce_base_msix_offset;
1834
1835         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1836                                            sizeof(u16), GFP_KERNEL);
1837         if (!hdev->vector_status) {
1838                 pci_free_irq_vectors(pdev);
1839                 return -ENOMEM;
1840         }
1841
1842         for (i = 0; i < hdev->num_msi; i++)
1843                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1844
1845         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1846                                         sizeof(int), GFP_KERNEL);
1847         if (!hdev->vector_irq) {
1848                 pci_free_irq_vectors(pdev);
1849                 return -ENOMEM;
1850         }
1851
1852         return 0;
1853 }
1854
1855 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1856 {
1857
1858         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1859                 duplex = HCLGE_MAC_FULL;
1860
1861         return duplex;
1862 }
1863
1864 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1865                                       u8 duplex)
1866 {
1867         struct hclge_config_mac_speed_dup_cmd *req;
1868         struct hclge_desc desc;
1869         int ret;
1870
1871         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1872
1873         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1874
1875         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1876
1877         switch (speed) {
1878         case HCLGE_MAC_SPEED_10M:
1879                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1880                                 HCLGE_CFG_SPEED_S, 6);
1881                 break;
1882         case HCLGE_MAC_SPEED_100M:
1883                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1884                                 HCLGE_CFG_SPEED_S, 7);
1885                 break;
1886         case HCLGE_MAC_SPEED_1G:
1887                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1888                                 HCLGE_CFG_SPEED_S, 0);
1889                 break;
1890         case HCLGE_MAC_SPEED_10G:
1891                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1892                                 HCLGE_CFG_SPEED_S, 1);
1893                 break;
1894         case HCLGE_MAC_SPEED_25G:
1895                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1896                                 HCLGE_CFG_SPEED_S, 2);
1897                 break;
1898         case HCLGE_MAC_SPEED_40G:
1899                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1900                                 HCLGE_CFG_SPEED_S, 3);
1901                 break;
1902         case HCLGE_MAC_SPEED_50G:
1903                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1904                                 HCLGE_CFG_SPEED_S, 4);
1905                 break;
1906         case HCLGE_MAC_SPEED_100G:
1907                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1908                                 HCLGE_CFG_SPEED_S, 5);
1909                 break;
1910         default:
1911                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1912                 return -EINVAL;
1913         }
1914
1915         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1916                       1);
1917
1918         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1919         if (ret) {
1920                 dev_err(&hdev->pdev->dev,
1921                         "mac speed/duplex config cmd failed %d.\n", ret);
1922                 return ret;
1923         }
1924
1925         return 0;
1926 }
1927
1928 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1929 {
1930         int ret;
1931
1932         duplex = hclge_check_speed_dup(duplex, speed);
1933         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
1934                 return 0;
1935
1936         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
1937         if (ret)
1938                 return ret;
1939
1940         hdev->hw.mac.speed = speed;
1941         hdev->hw.mac.duplex = duplex;
1942
1943         return 0;
1944 }
1945
1946 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
1947                                      u8 duplex)
1948 {
1949         struct hclge_vport *vport = hclge_get_vport(handle);
1950         struct hclge_dev *hdev = vport->back;
1951
1952         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
1953 }
1954
1955 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
1956 {
1957         struct hclge_config_auto_neg_cmd *req;
1958         struct hclge_desc desc;
1959         u32 flag = 0;
1960         int ret;
1961
1962         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
1963
1964         req = (struct hclge_config_auto_neg_cmd *)desc.data;
1965         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
1966         req->cfg_an_cmd_flag = cpu_to_le32(flag);
1967
1968         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1969         if (ret)
1970                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
1971                         ret);
1972
1973         return ret;
1974 }
1975
1976 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
1977 {
1978         struct hclge_vport *vport = hclge_get_vport(handle);
1979         struct hclge_dev *hdev = vport->back;
1980
1981         return hclge_set_autoneg_en(hdev, enable);
1982 }
1983
1984 static int hclge_get_autoneg(struct hnae3_handle *handle)
1985 {
1986         struct hclge_vport *vport = hclge_get_vport(handle);
1987         struct hclge_dev *hdev = vport->back;
1988         struct phy_device *phydev = hdev->hw.mac.phydev;
1989
1990         if (phydev)
1991                 return phydev->autoneg;
1992
1993         return hdev->hw.mac.autoneg;
1994 }
1995
1996 static int hclge_mac_init(struct hclge_dev *hdev)
1997 {
1998         struct hclge_mac *mac = &hdev->hw.mac;
1999         int ret;
2000
2001         hdev->support_sfp_query = true;
2002         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2003         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2004                                          hdev->hw.mac.duplex);
2005         if (ret) {
2006                 dev_err(&hdev->pdev->dev,
2007                         "Config mac speed dup fail ret=%d\n", ret);
2008                 return ret;
2009         }
2010
2011         mac->link = 0;
2012
2013         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2014         if (ret) {
2015                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2016                 return ret;
2017         }
2018
2019         ret = hclge_buffer_alloc(hdev);
2020         if (ret)
2021                 dev_err(&hdev->pdev->dev,
2022                         "allocate buffer fail, ret=%d\n", ret);
2023
2024         return ret;
2025 }
2026
2027 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2028 {
2029         if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2030                 schedule_work(&hdev->mbx_service_task);
2031 }
2032
2033 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2034 {
2035         if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2036                 schedule_work(&hdev->rst_service_task);
2037 }
2038
2039 static void hclge_task_schedule(struct hclge_dev *hdev)
2040 {
2041         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2042             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2043             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2044                 (void)schedule_work(&hdev->service_task);
2045 }
2046
2047 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2048 {
2049         struct hclge_link_status_cmd *req;
2050         struct hclge_desc desc;
2051         int link_status;
2052         int ret;
2053
2054         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2055         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2056         if (ret) {
2057                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2058                         ret);
2059                 return ret;
2060         }
2061
2062         req = (struct hclge_link_status_cmd *)desc.data;
2063         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2064
2065         return !!link_status;
2066 }
2067
2068 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2069 {
2070         int mac_state;
2071         int link_stat;
2072
2073         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2074                 return 0;
2075
2076         mac_state = hclge_get_mac_link_status(hdev);
2077
2078         if (hdev->hw.mac.phydev) {
2079                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2080                         link_stat = mac_state &
2081                                 hdev->hw.mac.phydev->link;
2082                 else
2083                         link_stat = 0;
2084
2085         } else {
2086                 link_stat = mac_state;
2087         }
2088
2089         return !!link_stat;
2090 }
2091
2092 static void hclge_update_link_status(struct hclge_dev *hdev)
2093 {
2094         struct hnae3_client *client = hdev->nic_client;
2095         struct hnae3_handle *handle;
2096         int state;
2097         int i;
2098
2099         if (!client)
2100                 return;
2101         state = hclge_get_mac_phy_link(hdev);
2102         if (state != hdev->hw.mac.link) {
2103                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2104                         handle = &hdev->vport[i].nic;
2105                         client->ops->link_status_change(handle, state);
2106                 }
2107                 hdev->hw.mac.link = state;
2108         }
2109 }
2110
2111 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2112 {
2113         struct hclge_sfp_speed_cmd *resp = NULL;
2114         struct hclge_desc desc;
2115         int ret;
2116
2117         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2118         resp = (struct hclge_sfp_speed_cmd *)desc.data;
2119         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2120         if (ret == -EOPNOTSUPP) {
2121                 dev_warn(&hdev->pdev->dev,
2122                          "IMP do not support get SFP speed %d\n", ret);
2123                 return ret;
2124         } else if (ret) {
2125                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2126                 return ret;
2127         }
2128
2129         *speed = resp->sfp_speed;
2130
2131         return 0;
2132 }
2133
2134 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2135 {
2136         struct hclge_mac mac = hdev->hw.mac;
2137         int speed;
2138         int ret;
2139
2140         /* get the speed from SFP cmd when phy
2141          * doesn't exit.
2142          */
2143         if (mac.phydev)
2144                 return 0;
2145
2146         /* if IMP does not support get SFP/qSFP speed, return directly */
2147         if (!hdev->support_sfp_query)
2148                 return 0;
2149
2150         ret = hclge_get_sfp_speed(hdev, &speed);
2151         if (ret == -EOPNOTSUPP) {
2152                 hdev->support_sfp_query = false;
2153                 return ret;
2154         } else if (ret) {
2155                 return ret;
2156         }
2157
2158         if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2159                 return 0; /* do nothing if no SFP */
2160
2161         /* must config full duplex for SFP */
2162         return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2163 }
2164
2165 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2166 {
2167         struct hclge_vport *vport = hclge_get_vport(handle);
2168         struct hclge_dev *hdev = vport->back;
2169
2170         return hclge_update_speed_duplex(hdev);
2171 }
2172
2173 static int hclge_get_status(struct hnae3_handle *handle)
2174 {
2175         struct hclge_vport *vport = hclge_get_vport(handle);
2176         struct hclge_dev *hdev = vport->back;
2177
2178         hclge_update_link_status(hdev);
2179
2180         return hdev->hw.mac.link;
2181 }
2182
2183 static void hclge_service_timer(struct timer_list *t)
2184 {
2185         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2186
2187         mod_timer(&hdev->service_timer, jiffies + HZ);
2188         hdev->hw_stats.stats_timer++;
2189         hclge_task_schedule(hdev);
2190 }
2191
2192 static void hclge_service_complete(struct hclge_dev *hdev)
2193 {
2194         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2195
2196         /* Flush memory before next watchdog */
2197         smp_mb__before_atomic();
2198         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2199 }
2200
2201 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2202 {
2203         u32 rst_src_reg;
2204         u32 cmdq_src_reg;
2205
2206         /* fetch the events from their corresponding regs */
2207         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2208         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2209
2210         /* Assumption: If by any chance reset and mailbox events are reported
2211          * together then we will only process reset event in this go and will
2212          * defer the processing of the mailbox events. Since, we would have not
2213          * cleared RX CMDQ event this time we would receive again another
2214          * interrupt from H/W just for the mailbox.
2215          */
2216
2217         /* check for vector0 reset event sources */
2218         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2219                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2220                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2221                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2222                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2223                 return HCLGE_VECTOR0_EVENT_RST;
2224         }
2225
2226         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2227                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2228                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2229                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2230                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2231                 return HCLGE_VECTOR0_EVENT_RST;
2232         }
2233
2234         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2235                 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2236                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2237                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2238                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2239                 return HCLGE_VECTOR0_EVENT_RST;
2240         }
2241
2242         /* check for vector0 mailbox(=CMDQ RX) event source */
2243         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2244                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2245                 *clearval = cmdq_src_reg;
2246                 return HCLGE_VECTOR0_EVENT_MBX;
2247         }
2248
2249         return HCLGE_VECTOR0_EVENT_OTHER;
2250 }
2251
2252 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2253                                     u32 regclr)
2254 {
2255         switch (event_type) {
2256         case HCLGE_VECTOR0_EVENT_RST:
2257                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2258                 break;
2259         case HCLGE_VECTOR0_EVENT_MBX:
2260                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2261                 break;
2262         default:
2263                 break;
2264         }
2265 }
2266
2267 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2268 {
2269         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2270                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2271                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2272                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2273         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2274 }
2275
2276 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2277 {
2278         writel(enable ? 1 : 0, vector->addr);
2279 }
2280
2281 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2282 {
2283         struct hclge_dev *hdev = data;
2284         u32 event_cause;
2285         u32 clearval;
2286
2287         hclge_enable_vector(&hdev->misc_vector, false);
2288         event_cause = hclge_check_event_cause(hdev, &clearval);
2289
2290         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2291         switch (event_cause) {
2292         case HCLGE_VECTOR0_EVENT_RST:
2293                 hclge_reset_task_schedule(hdev);
2294                 break;
2295         case HCLGE_VECTOR0_EVENT_MBX:
2296                 /* If we are here then,
2297                  * 1. Either we are not handling any mbx task and we are not
2298                  *    scheduled as well
2299                  *                        OR
2300                  * 2. We could be handling a mbx task but nothing more is
2301                  *    scheduled.
2302                  * In both cases, we should schedule mbx task as there are more
2303                  * mbx messages reported by this interrupt.
2304                  */
2305                 hclge_mbx_task_schedule(hdev);
2306                 break;
2307         default:
2308                 dev_warn(&hdev->pdev->dev,
2309                          "received unknown or unhandled event of vector0\n");
2310                 break;
2311         }
2312
2313         /* clear the source of interrupt if it is not cause by reset */
2314         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2315                 hclge_clear_event_cause(hdev, event_cause, clearval);
2316                 hclge_enable_vector(&hdev->misc_vector, true);
2317         }
2318
2319         return IRQ_HANDLED;
2320 }
2321
2322 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2323 {
2324         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2325                 dev_warn(&hdev->pdev->dev,
2326                          "vector(vector_id %d) has been freed.\n", vector_id);
2327                 return;
2328         }
2329
2330         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2331         hdev->num_msi_left += 1;
2332         hdev->num_msi_used -= 1;
2333 }
2334
2335 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2336 {
2337         struct hclge_misc_vector *vector = &hdev->misc_vector;
2338
2339         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2340
2341         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2342         hdev->vector_status[0] = 0;
2343
2344         hdev->num_msi_left -= 1;
2345         hdev->num_msi_used += 1;
2346 }
2347
2348 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2349 {
2350         int ret;
2351
2352         hclge_get_misc_vector(hdev);
2353
2354         /* this would be explicitly freed in the end */
2355         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2356                           0, "hclge_misc", hdev);
2357         if (ret) {
2358                 hclge_free_vector(hdev, 0);
2359                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2360                         hdev->misc_vector.vector_irq);
2361         }
2362
2363         return ret;
2364 }
2365
2366 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2367 {
2368         free_irq(hdev->misc_vector.vector_irq, hdev);
2369         hclge_free_vector(hdev, 0);
2370 }
2371
2372 static int hclge_notify_client(struct hclge_dev *hdev,
2373                                enum hnae3_reset_notify_type type)
2374 {
2375         struct hnae3_client *client = hdev->nic_client;
2376         u16 i;
2377
2378         if (!client->ops->reset_notify)
2379                 return -EOPNOTSUPP;
2380
2381         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2382                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2383                 int ret;
2384
2385                 ret = client->ops->reset_notify(handle, type);
2386                 if (ret) {
2387                         dev_err(&hdev->pdev->dev,
2388                                 "notify nic client failed %d(%d)\n", type, ret);
2389                         return ret;
2390                 }
2391         }
2392
2393         return 0;
2394 }
2395
2396 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2397                                     enum hnae3_reset_notify_type type)
2398 {
2399         struct hnae3_client *client = hdev->roce_client;
2400         int ret = 0;
2401         u16 i;
2402
2403         if (!client)
2404                 return 0;
2405
2406         if (!client->ops->reset_notify)
2407                 return -EOPNOTSUPP;
2408
2409         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2410                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2411
2412                 ret = client->ops->reset_notify(handle, type);
2413                 if (ret) {
2414                         dev_err(&hdev->pdev->dev,
2415                                 "notify roce client failed %d(%d)",
2416                                 type, ret);
2417                         return ret;
2418                 }
2419         }
2420
2421         return ret;
2422 }
2423
2424 static int hclge_reset_wait(struct hclge_dev *hdev)
2425 {
2426 #define HCLGE_RESET_WATI_MS     100
2427 #define HCLGE_RESET_WAIT_CNT    200
2428         u32 val, reg, reg_bit;
2429         u32 cnt = 0;
2430
2431         switch (hdev->reset_type) {
2432         case HNAE3_IMP_RESET:
2433                 reg = HCLGE_GLOBAL_RESET_REG;
2434                 reg_bit = HCLGE_IMP_RESET_BIT;
2435                 break;
2436         case HNAE3_GLOBAL_RESET:
2437                 reg = HCLGE_GLOBAL_RESET_REG;
2438                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2439                 break;
2440         case HNAE3_CORE_RESET:
2441                 reg = HCLGE_GLOBAL_RESET_REG;
2442                 reg_bit = HCLGE_CORE_RESET_BIT;
2443                 break;
2444         case HNAE3_FUNC_RESET:
2445                 reg = HCLGE_FUN_RST_ING;
2446                 reg_bit = HCLGE_FUN_RST_ING_B;
2447                 break;
2448         case HNAE3_FLR_RESET:
2449                 break;
2450         default:
2451                 dev_err(&hdev->pdev->dev,
2452                         "Wait for unsupported reset type: %d\n",
2453                         hdev->reset_type);
2454                 return -EINVAL;
2455         }
2456
2457         if (hdev->reset_type == HNAE3_FLR_RESET) {
2458                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2459                        cnt++ < HCLGE_RESET_WAIT_CNT)
2460                         msleep(HCLGE_RESET_WATI_MS);
2461
2462                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2463                         dev_err(&hdev->pdev->dev,
2464                                 "flr wait timeout: %d\n", cnt);
2465                         return -EBUSY;
2466                 }
2467
2468                 return 0;
2469         }
2470
2471         val = hclge_read_dev(&hdev->hw, reg);
2472         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2473                 msleep(HCLGE_RESET_WATI_MS);
2474                 val = hclge_read_dev(&hdev->hw, reg);
2475                 cnt++;
2476         }
2477
2478         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2479                 dev_warn(&hdev->pdev->dev,
2480                          "Wait for reset timeout: %d\n", hdev->reset_type);
2481                 return -EBUSY;
2482         }
2483
2484         return 0;
2485 }
2486
2487 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2488 {
2489         struct hclge_vf_rst_cmd *req;
2490         struct hclge_desc desc;
2491
2492         req = (struct hclge_vf_rst_cmd *)desc.data;
2493         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2494         req->dest_vfid = func_id;
2495
2496         if (reset)
2497                 req->vf_rst = 0x1;
2498
2499         return hclge_cmd_send(&hdev->hw, &desc, 1);
2500 }
2501
2502 int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2503 {
2504         int i;
2505
2506         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2507                 struct hclge_vport *vport = &hdev->vport[i];
2508                 int ret;
2509
2510                 /* Send cmd to set/clear VF's FUNC_RST_ING */
2511                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2512                 if (ret) {
2513                         dev_err(&hdev->pdev->dev,
2514                                 "set vf(%d) rst failed %d!\n",
2515                                 vport->vport_id, ret);
2516                         return ret;
2517                 }
2518
2519                 if (!reset)
2520                         continue;
2521
2522                 /* Inform VF to process the reset.
2523                  * hclge_inform_reset_assert_to_vf may fail if VF
2524                  * driver is not loaded.
2525                  */
2526                 ret = hclge_inform_reset_assert_to_vf(vport);
2527                 if (ret)
2528                         dev_warn(&hdev->pdev->dev,
2529                                  "inform reset to vf(%d) failed %d!\n",
2530                                  vport->vport_id, ret);
2531         }
2532
2533         return 0;
2534 }
2535
2536 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2537 {
2538         struct hclge_desc desc;
2539         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2540         int ret;
2541
2542         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2543         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2544         req->fun_reset_vfid = func_id;
2545
2546         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2547         if (ret)
2548                 dev_err(&hdev->pdev->dev,
2549                         "send function reset cmd fail, status =%d\n", ret);
2550
2551         return ret;
2552 }
2553
2554 static void hclge_do_reset(struct hclge_dev *hdev)
2555 {
2556         struct pci_dev *pdev = hdev->pdev;
2557         u32 val;
2558
2559         switch (hdev->reset_type) {
2560         case HNAE3_GLOBAL_RESET:
2561                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2562                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2563                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2564                 dev_info(&pdev->dev, "Global Reset requested\n");
2565                 break;
2566         case HNAE3_CORE_RESET:
2567                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2568                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2569                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2570                 dev_info(&pdev->dev, "Core Reset requested\n");
2571                 break;
2572         case HNAE3_FUNC_RESET:
2573                 dev_info(&pdev->dev, "PF Reset requested\n");
2574                 /* schedule again to check later */
2575                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2576                 hclge_reset_task_schedule(hdev);
2577                 break;
2578         case HNAE3_FLR_RESET:
2579                 dev_info(&pdev->dev, "FLR requested\n");
2580                 /* schedule again to check later */
2581                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2582                 hclge_reset_task_schedule(hdev);
2583                 break;
2584         default:
2585                 dev_warn(&pdev->dev,
2586                          "Unsupported reset type: %d\n", hdev->reset_type);
2587                 break;
2588         }
2589 }
2590
2591 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2592                                                    unsigned long *addr)
2593 {
2594         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2595
2596         /* return the highest priority reset level amongst all */
2597         if (test_bit(HNAE3_IMP_RESET, addr)) {
2598                 rst_level = HNAE3_IMP_RESET;
2599                 clear_bit(HNAE3_IMP_RESET, addr);
2600                 clear_bit(HNAE3_GLOBAL_RESET, addr);
2601                 clear_bit(HNAE3_CORE_RESET, addr);
2602                 clear_bit(HNAE3_FUNC_RESET, addr);
2603         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2604                 rst_level = HNAE3_GLOBAL_RESET;
2605                 clear_bit(HNAE3_GLOBAL_RESET, addr);
2606                 clear_bit(HNAE3_CORE_RESET, addr);
2607                 clear_bit(HNAE3_FUNC_RESET, addr);
2608         } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2609                 rst_level = HNAE3_CORE_RESET;
2610                 clear_bit(HNAE3_CORE_RESET, addr);
2611                 clear_bit(HNAE3_FUNC_RESET, addr);
2612         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2613                 rst_level = HNAE3_FUNC_RESET;
2614                 clear_bit(HNAE3_FUNC_RESET, addr);
2615         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2616                 rst_level = HNAE3_FLR_RESET;
2617                 clear_bit(HNAE3_FLR_RESET, addr);
2618         }
2619
2620         return rst_level;
2621 }
2622
2623 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2624 {
2625         u32 clearval = 0;
2626
2627         switch (hdev->reset_type) {
2628         case HNAE3_IMP_RESET:
2629                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2630                 break;
2631         case HNAE3_GLOBAL_RESET:
2632                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2633                 break;
2634         case HNAE3_CORE_RESET:
2635                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2636                 break;
2637         default:
2638                 break;
2639         }
2640
2641         if (!clearval)
2642                 return;
2643
2644         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2645         hclge_enable_vector(&hdev->misc_vector, true);
2646 }
2647
2648 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2649 {
2650         int ret = 0;
2651
2652         switch (hdev->reset_type) {
2653         case HNAE3_FUNC_RESET:
2654                 /* fall through */
2655         case HNAE3_FLR_RESET:
2656                 ret = hclge_set_all_vf_rst(hdev, true);
2657                 break;
2658         default:
2659                 break;
2660         }
2661
2662         return ret;
2663 }
2664
2665 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2666 {
2667         u32 reg_val;
2668         int ret = 0;
2669
2670         switch (hdev->reset_type) {
2671         case HNAE3_FUNC_RESET:
2672                 /* There is no mechanism for PF to know if VF has stopped IO
2673                  * for now, just wait 100 ms for VF to stop IO
2674                  */
2675                 msleep(100);
2676                 ret = hclge_func_reset_cmd(hdev, 0);
2677                 if (ret) {
2678                         dev_err(&hdev->pdev->dev,
2679                                 "asserting function reset fail %d!\n", ret);
2680                         return ret;
2681                 }
2682
2683                 /* After performaning pf reset, it is not necessary to do the
2684                  * mailbox handling or send any command to firmware, because
2685                  * any mailbox handling or command to firmware is only valid
2686                  * after hclge_cmd_init is called.
2687                  */
2688                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2689                 break;
2690         case HNAE3_FLR_RESET:
2691                 /* There is no mechanism for PF to know if VF has stopped IO
2692                  * for now, just wait 100 ms for VF to stop IO
2693                  */
2694                 msleep(100);
2695                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2696                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2697                 break;
2698         case HNAE3_IMP_RESET:
2699                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2700                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2701                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2702                 break;
2703         default:
2704                 break;
2705         }
2706
2707         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2708
2709         return ret;
2710 }
2711
2712 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2713 {
2714 #define MAX_RESET_FAIL_CNT 5
2715 #define RESET_UPGRADE_DELAY_SEC 10
2716
2717         if (hdev->reset_pending) {
2718                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2719                          hdev->reset_pending);
2720                 return true;
2721         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2722                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2723                     BIT(HCLGE_IMP_RESET_BIT))) {
2724                 dev_info(&hdev->pdev->dev,
2725                          "reset failed because IMP Reset is pending\n");
2726                 hclge_clear_reset_cause(hdev);
2727                 return false;
2728         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2729                 hdev->reset_fail_cnt++;
2730                 if (is_timeout) {
2731                         set_bit(hdev->reset_type, &hdev->reset_pending);
2732                         dev_info(&hdev->pdev->dev,
2733                                  "re-schedule to wait for hw reset done\n");
2734                         return true;
2735                 }
2736
2737                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2738                 hclge_clear_reset_cause(hdev);
2739                 mod_timer(&hdev->reset_timer,
2740                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2741
2742                 return false;
2743         }
2744
2745         hclge_clear_reset_cause(hdev);
2746         dev_err(&hdev->pdev->dev, "Reset fail!\n");
2747         return false;
2748 }
2749
2750 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2751 {
2752         int ret = 0;
2753
2754         switch (hdev->reset_type) {
2755         case HNAE3_FUNC_RESET:
2756                 /* fall through */
2757         case HNAE3_FLR_RESET:
2758                 ret = hclge_set_all_vf_rst(hdev, false);
2759                 break;
2760         default:
2761                 break;
2762         }
2763
2764         return ret;
2765 }
2766
2767 static void hclge_reset(struct hclge_dev *hdev)
2768 {
2769         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2770         bool is_timeout = false;
2771         int ret;
2772
2773         /* Initialize ae_dev reset status as well, in case enet layer wants to
2774          * know if device is undergoing reset
2775          */
2776         ae_dev->reset_type = hdev->reset_type;
2777         hdev->reset_count++;
2778         hdev->last_reset_time = jiffies;
2779         /* perform reset of the stack & ae device for a client */
2780         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2781         if (ret)
2782                 goto err_reset;
2783
2784         ret = hclge_reset_prepare_down(hdev);
2785         if (ret)
2786                 goto err_reset;
2787
2788         rtnl_lock();
2789         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2790         if (ret)
2791                 goto err_reset_lock;
2792
2793         rtnl_unlock();
2794
2795         ret = hclge_reset_prepare_wait(hdev);
2796         if (ret)
2797                 goto err_reset;
2798
2799         if (hclge_reset_wait(hdev)) {
2800                 is_timeout = true;
2801                 goto err_reset;
2802         }
2803
2804         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2805         if (ret)
2806                 goto err_reset;
2807
2808         rtnl_lock();
2809         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2810         if (ret)
2811                 goto err_reset_lock;
2812
2813         ret = hclge_reset_ae_dev(hdev->ae_dev);
2814         if (ret)
2815                 goto err_reset_lock;
2816
2817         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2818         if (ret)
2819                 goto err_reset_lock;
2820
2821         hclge_clear_reset_cause(hdev);
2822
2823         ret = hclge_reset_prepare_up(hdev);
2824         if (ret)
2825                 goto err_reset_lock;
2826
2827         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2828         if (ret)
2829                 goto err_reset_lock;
2830
2831         rtnl_unlock();
2832
2833         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2834         if (ret)
2835                 goto err_reset;
2836
2837         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2838         if (ret)
2839                 goto err_reset;
2840
2841         return;
2842
2843 err_reset_lock:
2844         rtnl_unlock();
2845 err_reset:
2846         if (hclge_reset_err_handle(hdev, is_timeout))
2847                 hclge_reset_task_schedule(hdev);
2848 }
2849
2850 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
2851 {
2852         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2853         struct hclge_dev *hdev = ae_dev->priv;
2854
2855         /* We might end up getting called broadly because of 2 below cases:
2856          * 1. Recoverable error was conveyed through APEI and only way to bring
2857          *    normalcy is to reset.
2858          * 2. A new reset request from the stack due to timeout
2859          *
2860          * For the first case,error event might not have ae handle available.
2861          * check if this is a new reset request and we are not here just because
2862          * last reset attempt did not succeed and watchdog hit us again. We will
2863          * know this if last reset request did not occur very recently (watchdog
2864          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2865          * In case of new request we reset the "reset level" to PF reset.
2866          * And if it is a repeat reset request of the most recent one then we
2867          * want to make sure we throttle the reset request. Therefore, we will
2868          * not allow it again before 3*HZ times.
2869          */
2870         if (!handle)
2871                 handle = &hdev->vport[0].nic;
2872
2873         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
2874                 return;
2875         else if (hdev->default_reset_request)
2876                 hdev->reset_level =
2877                         hclge_get_reset_level(hdev,
2878                                               &hdev->default_reset_request);
2879         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
2880                 hdev->reset_level = HNAE3_FUNC_RESET;
2881
2882         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2883                  hdev->reset_level);
2884
2885         /* request reset & schedule reset task */
2886         set_bit(hdev->reset_level, &hdev->reset_request);
2887         hclge_reset_task_schedule(hdev);
2888
2889         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
2890                 hdev->reset_level++;
2891 }
2892
2893 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
2894                                         enum hnae3_reset_type rst_type)
2895 {
2896         struct hclge_dev *hdev = ae_dev->priv;
2897
2898         set_bit(rst_type, &hdev->default_reset_request);
2899 }
2900
2901 static void hclge_reset_timer(struct timer_list *t)
2902 {
2903         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
2904
2905         dev_info(&hdev->pdev->dev,
2906                  "triggering global reset in reset timer\n");
2907         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
2908         hclge_reset_event(hdev->pdev, NULL);
2909 }
2910
2911 static void hclge_reset_subtask(struct hclge_dev *hdev)
2912 {
2913         /* check if there is any ongoing reset in the hardware. This status can
2914          * be checked from reset_pending. If there is then, we need to wait for
2915          * hardware to complete reset.
2916          *    a. If we are able to figure out in reasonable time that hardware
2917          *       has fully resetted then, we can proceed with driver, client
2918          *       reset.
2919          *    b. else, we can come back later to check this status so re-sched
2920          *       now.
2921          */
2922         hdev->last_reset_time = jiffies;
2923         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2924         if (hdev->reset_type != HNAE3_NONE_RESET)
2925                 hclge_reset(hdev);
2926
2927         /* check if we got any *new* reset requests to be honored */
2928         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2929         if (hdev->reset_type != HNAE3_NONE_RESET)
2930                 hclge_do_reset(hdev);
2931
2932         hdev->reset_type = HNAE3_NONE_RESET;
2933 }
2934
2935 static void hclge_reset_service_task(struct work_struct *work)
2936 {
2937         struct hclge_dev *hdev =
2938                 container_of(work, struct hclge_dev, rst_service_task);
2939
2940         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2941                 return;
2942
2943         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2944
2945         hclge_reset_subtask(hdev);
2946
2947         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2948 }
2949
2950 static void hclge_mailbox_service_task(struct work_struct *work)
2951 {
2952         struct hclge_dev *hdev =
2953                 container_of(work, struct hclge_dev, mbx_service_task);
2954
2955         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2956                 return;
2957
2958         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2959
2960         hclge_mbx_handler(hdev);
2961
2962         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2963 }
2964
2965 static void hclge_update_vport_alive(struct hclge_dev *hdev)
2966 {
2967         int i;
2968
2969         /* start from vport 1 for PF is always alive */
2970         for (i = 1; i < hdev->num_alloc_vport; i++) {
2971                 struct hclge_vport *vport = &hdev->vport[i];
2972
2973                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
2974                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
2975
2976                 /* If vf is not alive, set to default value */
2977                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2978                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
2979         }
2980 }
2981
2982 static void hclge_service_task(struct work_struct *work)
2983 {
2984         struct hclge_dev *hdev =
2985                 container_of(work, struct hclge_dev, service_task);
2986
2987         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2988                 hclge_update_stats_for_all(hdev);
2989                 hdev->hw_stats.stats_timer = 0;
2990         }
2991
2992         hclge_update_speed_duplex(hdev);
2993         hclge_update_link_status(hdev);
2994         hclge_update_vport_alive(hdev);
2995         hclge_service_complete(hdev);
2996 }
2997
2998 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2999 {
3000         /* VF handle has no client */
3001         if (!handle->client)
3002                 return container_of(handle, struct hclge_vport, nic);
3003         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3004                 return container_of(handle, struct hclge_vport, roce);
3005         else
3006                 return container_of(handle, struct hclge_vport, nic);
3007 }
3008
3009 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3010                             struct hnae3_vector_info *vector_info)
3011 {
3012         struct hclge_vport *vport = hclge_get_vport(handle);
3013         struct hnae3_vector_info *vector = vector_info;
3014         struct hclge_dev *hdev = vport->back;
3015         int alloc = 0;
3016         int i, j;
3017
3018         vector_num = min(hdev->num_msi_left, vector_num);
3019
3020         for (j = 0; j < vector_num; j++) {
3021                 for (i = 1; i < hdev->num_msi; i++) {
3022                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3023                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3024                                 vector->io_addr = hdev->hw.io_base +
3025                                         HCLGE_VECTOR_REG_BASE +
3026                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3027                                         vport->vport_id *
3028                                         HCLGE_VECTOR_VF_OFFSET;
3029                                 hdev->vector_status[i] = vport->vport_id;
3030                                 hdev->vector_irq[i] = vector->vector;
3031
3032                                 vector++;
3033                                 alloc++;
3034
3035                                 break;
3036                         }
3037                 }
3038         }
3039         hdev->num_msi_left -= alloc;
3040         hdev->num_msi_used += alloc;
3041
3042         return alloc;
3043 }
3044
3045 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3046 {
3047         int i;
3048
3049         for (i = 0; i < hdev->num_msi; i++)
3050                 if (vector == hdev->vector_irq[i])
3051                         return i;
3052
3053         return -EINVAL;
3054 }
3055
3056 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3057 {
3058         struct hclge_vport *vport = hclge_get_vport(handle);
3059         struct hclge_dev *hdev = vport->back;
3060         int vector_id;
3061
3062         vector_id = hclge_get_vector_index(hdev, vector);
3063         if (vector_id < 0) {
3064                 dev_err(&hdev->pdev->dev,
3065                         "Get vector index fail. vector_id =%d\n", vector_id);
3066                 return vector_id;
3067         }
3068
3069         hclge_free_vector(hdev, vector_id);
3070
3071         return 0;
3072 }
3073
3074 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3075 {
3076         return HCLGE_RSS_KEY_SIZE;
3077 }
3078
3079 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3080 {
3081         return HCLGE_RSS_IND_TBL_SIZE;
3082 }
3083
3084 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3085                                   const u8 hfunc, const u8 *key)
3086 {
3087         struct hclge_rss_config_cmd *req;
3088         struct hclge_desc desc;
3089         int key_offset;
3090         int key_size;
3091         int ret;
3092
3093         req = (struct hclge_rss_config_cmd *)desc.data;
3094
3095         for (key_offset = 0; key_offset < 3; key_offset++) {
3096                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3097                                            false);
3098
3099                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3100                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3101
3102                 if (key_offset == 2)
3103                         key_size =
3104                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3105                 else
3106                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3107
3108                 memcpy(req->hash_key,
3109                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3110
3111                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3112                 if (ret) {
3113                         dev_err(&hdev->pdev->dev,
3114                                 "Configure RSS config fail, status = %d\n",
3115                                 ret);
3116                         return ret;
3117                 }
3118         }
3119         return 0;
3120 }
3121
3122 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3123 {
3124         struct hclge_rss_indirection_table_cmd *req;
3125         struct hclge_desc desc;
3126         int i, j;
3127         int ret;
3128
3129         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3130
3131         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3132                 hclge_cmd_setup_basic_desc
3133                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3134
3135                 req->start_table_index =
3136                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3137                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3138
3139                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3140                         req->rss_result[j] =
3141                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3142
3143                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3144                 if (ret) {
3145                         dev_err(&hdev->pdev->dev,
3146                                 "Configure rss indir table fail,status = %d\n",
3147                                 ret);
3148                         return ret;
3149                 }
3150         }
3151         return 0;
3152 }
3153
3154 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3155                                  u16 *tc_size, u16 *tc_offset)
3156 {
3157         struct hclge_rss_tc_mode_cmd *req;
3158         struct hclge_desc desc;
3159         int ret;
3160         int i;
3161
3162         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3163         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3164
3165         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3166                 u16 mode = 0;
3167
3168                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3169                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3170                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3171                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3172                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3173
3174                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3175         }
3176
3177         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3178         if (ret)
3179                 dev_err(&hdev->pdev->dev,
3180                         "Configure rss tc mode fail, status = %d\n", ret);
3181
3182         return ret;
3183 }
3184
3185 static void hclge_get_rss_type(struct hclge_vport *vport)
3186 {
3187         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3188             vport->rss_tuple_sets.ipv4_udp_en ||
3189             vport->rss_tuple_sets.ipv4_sctp_en ||
3190             vport->rss_tuple_sets.ipv6_tcp_en ||
3191             vport->rss_tuple_sets.ipv6_udp_en ||
3192             vport->rss_tuple_sets.ipv6_sctp_en)
3193                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3194         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3195                  vport->rss_tuple_sets.ipv6_fragment_en)
3196                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3197         else
3198                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3199 }
3200
3201 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3202 {
3203         struct hclge_rss_input_tuple_cmd *req;
3204         struct hclge_desc desc;
3205         int ret;
3206
3207         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3208
3209         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3210
3211         /* Get the tuple cfg from pf */
3212         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3213         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3214         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3215         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3216         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3217         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3218         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3219         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3220         hclge_get_rss_type(&hdev->vport[0]);
3221         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3222         if (ret)
3223                 dev_err(&hdev->pdev->dev,
3224                         "Configure rss input fail, status = %d\n", ret);
3225         return ret;
3226 }
3227
3228 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3229                          u8 *key, u8 *hfunc)
3230 {
3231         struct hclge_vport *vport = hclge_get_vport(handle);
3232         int i;
3233
3234         /* Get hash algorithm */
3235         if (hfunc) {
3236                 switch (vport->rss_algo) {
3237                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3238                         *hfunc = ETH_RSS_HASH_TOP;
3239                         break;
3240                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3241                         *hfunc = ETH_RSS_HASH_XOR;
3242                         break;
3243                 default:
3244                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3245                         break;
3246                 }
3247         }
3248
3249         /* Get the RSS Key required by the user */
3250         if (key)
3251                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3252
3253         /* Get indirect table */
3254         if (indir)
3255                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3256                         indir[i] =  vport->rss_indirection_tbl[i];
3257
3258         return 0;
3259 }
3260
3261 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3262                          const  u8 *key, const  u8 hfunc)
3263 {
3264         struct hclge_vport *vport = hclge_get_vport(handle);
3265         struct hclge_dev *hdev = vport->back;
3266         u8 hash_algo;
3267         int ret, i;
3268
3269         /* Set the RSS Hash Key if specififed by the user */
3270         if (key) {
3271                 switch (hfunc) {
3272                 case ETH_RSS_HASH_TOP:
3273                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3274                         break;
3275                 case ETH_RSS_HASH_XOR:
3276                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3277                         break;
3278                 case ETH_RSS_HASH_NO_CHANGE:
3279                         hash_algo = vport->rss_algo;
3280                         break;
3281                 default:
3282                         return -EINVAL;
3283                 }
3284
3285                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3286                 if (ret)
3287                         return ret;
3288
3289                 /* Update the shadow RSS key with user specified qids */
3290                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3291                 vport->rss_algo = hash_algo;
3292         }
3293
3294         /* Update the shadow RSS table with user specified qids */
3295         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3296                 vport->rss_indirection_tbl[i] = indir[i];
3297
3298         /* Update the hardware */
3299         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3300 }
3301
3302 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3303 {
3304         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3305
3306         if (nfc->data & RXH_L4_B_2_3)
3307                 hash_sets |= HCLGE_D_PORT_BIT;
3308         else
3309                 hash_sets &= ~HCLGE_D_PORT_BIT;
3310
3311         if (nfc->data & RXH_IP_SRC)
3312                 hash_sets |= HCLGE_S_IP_BIT;
3313         else
3314                 hash_sets &= ~HCLGE_S_IP_BIT;
3315
3316         if (nfc->data & RXH_IP_DST)
3317                 hash_sets |= HCLGE_D_IP_BIT;
3318         else
3319                 hash_sets &= ~HCLGE_D_IP_BIT;
3320
3321         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3322                 hash_sets |= HCLGE_V_TAG_BIT;
3323
3324         return hash_sets;
3325 }
3326
3327 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3328                                struct ethtool_rxnfc *nfc)
3329 {
3330         struct hclge_vport *vport = hclge_get_vport(handle);
3331         struct hclge_dev *hdev = vport->back;
3332         struct hclge_rss_input_tuple_cmd *req;
3333         struct hclge_desc desc;
3334         u8 tuple_sets;
3335         int ret;
3336
3337         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3338                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3339                 return -EINVAL;
3340
3341         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3342         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3343
3344         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3345         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3346         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3347         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3348         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3349         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3350         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3351         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3352
3353         tuple_sets = hclge_get_rss_hash_bits(nfc);
3354         switch (nfc->flow_type) {
3355         case TCP_V4_FLOW:
3356                 req->ipv4_tcp_en = tuple_sets;
3357                 break;
3358         case TCP_V6_FLOW:
3359                 req->ipv6_tcp_en = tuple_sets;
3360                 break;
3361         case UDP_V4_FLOW:
3362                 req->ipv4_udp_en = tuple_sets;
3363                 break;
3364         case UDP_V6_FLOW:
3365                 req->ipv6_udp_en = tuple_sets;
3366                 break;
3367         case SCTP_V4_FLOW:
3368                 req->ipv4_sctp_en = tuple_sets;
3369                 break;
3370         case SCTP_V6_FLOW:
3371                 if ((nfc->data & RXH_L4_B_0_1) ||
3372                     (nfc->data & RXH_L4_B_2_3))
3373                         return -EINVAL;
3374
3375                 req->ipv6_sctp_en = tuple_sets;
3376                 break;
3377         case IPV4_FLOW:
3378                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3379                 break;
3380         case IPV6_FLOW:
3381                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3382                 break;
3383         default:
3384                 return -EINVAL;
3385         }
3386
3387         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3388         if (ret) {
3389                 dev_err(&hdev->pdev->dev,
3390                         "Set rss tuple fail, status = %d\n", ret);
3391                 return ret;
3392         }
3393
3394         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3395         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3396         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3397         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3398         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3399         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3400         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3401         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3402         hclge_get_rss_type(vport);
3403         return 0;
3404 }
3405
3406 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3407                                struct ethtool_rxnfc *nfc)
3408 {
3409         struct hclge_vport *vport = hclge_get_vport(handle);
3410         u8 tuple_sets;
3411
3412         nfc->data = 0;
3413
3414         switch (nfc->flow_type) {
3415         case TCP_V4_FLOW:
3416                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3417                 break;
3418         case UDP_V4_FLOW:
3419                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3420                 break;
3421         case TCP_V6_FLOW:
3422                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3423                 break;
3424         case UDP_V6_FLOW:
3425                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3426                 break;
3427         case SCTP_V4_FLOW:
3428                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3429                 break;
3430         case SCTP_V6_FLOW:
3431                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3432                 break;
3433         case IPV4_FLOW:
3434         case IPV6_FLOW:
3435                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3436                 break;
3437         default:
3438                 return -EINVAL;
3439         }
3440
3441         if (!tuple_sets)
3442                 return 0;
3443
3444         if (tuple_sets & HCLGE_D_PORT_BIT)
3445                 nfc->data |= RXH_L4_B_2_3;
3446         if (tuple_sets & HCLGE_S_PORT_BIT)
3447                 nfc->data |= RXH_L4_B_0_1;
3448         if (tuple_sets & HCLGE_D_IP_BIT)
3449                 nfc->data |= RXH_IP_DST;
3450         if (tuple_sets & HCLGE_S_IP_BIT)
3451                 nfc->data |= RXH_IP_SRC;
3452
3453         return 0;
3454 }
3455
3456 static int hclge_get_tc_size(struct hnae3_handle *handle)
3457 {
3458         struct hclge_vport *vport = hclge_get_vport(handle);
3459         struct hclge_dev *hdev = vport->back;
3460
3461         return hdev->rss_size_max;
3462 }
3463
3464 int hclge_rss_init_hw(struct hclge_dev *hdev)
3465 {
3466         struct hclge_vport *vport = hdev->vport;
3467         u8 *rss_indir = vport[0].rss_indirection_tbl;
3468         u16 rss_size = vport[0].alloc_rss_size;
3469         u8 *key = vport[0].rss_hash_key;
3470         u8 hfunc = vport[0].rss_algo;
3471         u16 tc_offset[HCLGE_MAX_TC_NUM];
3472         u16 tc_valid[HCLGE_MAX_TC_NUM];
3473         u16 tc_size[HCLGE_MAX_TC_NUM];
3474         u16 roundup_size;
3475         int i, ret;
3476
3477         ret = hclge_set_rss_indir_table(hdev, rss_indir);
3478         if (ret)
3479                 return ret;
3480
3481         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3482         if (ret)
3483                 return ret;
3484
3485         ret = hclge_set_rss_input_tuple(hdev);
3486         if (ret)
3487                 return ret;
3488
3489         /* Each TC have the same queue size, and tc_size set to hardware is
3490          * the log2 of roundup power of two of rss_size, the acutal queue
3491          * size is limited by indirection table.
3492          */
3493         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3494                 dev_err(&hdev->pdev->dev,
3495                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3496                         rss_size);
3497                 return -EINVAL;
3498         }
3499
3500         roundup_size = roundup_pow_of_two(rss_size);
3501         roundup_size = ilog2(roundup_size);
3502
3503         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3504                 tc_valid[i] = 0;
3505
3506                 if (!(hdev->hw_tc_map & BIT(i)))
3507                         continue;
3508
3509                 tc_valid[i] = 1;
3510                 tc_size[i] = roundup_size;
3511                 tc_offset[i] = rss_size * i;
3512         }
3513
3514         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3515 }
3516
3517 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3518 {
3519         struct hclge_vport *vport = hdev->vport;
3520         int i, j;
3521
3522         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3523                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3524                         vport[j].rss_indirection_tbl[i] =
3525                                 i % vport[j].alloc_rss_size;
3526         }
3527 }
3528
3529 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3530 {
3531         struct hclge_vport *vport = hdev->vport;
3532         int i;
3533
3534         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3535                 vport[i].rss_tuple_sets.ipv4_tcp_en =
3536                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3537                 vport[i].rss_tuple_sets.ipv4_udp_en =
3538                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3539                 vport[i].rss_tuple_sets.ipv4_sctp_en =
3540                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3541                 vport[i].rss_tuple_sets.ipv4_fragment_en =
3542                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3543                 vport[i].rss_tuple_sets.ipv6_tcp_en =
3544                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3545                 vport[i].rss_tuple_sets.ipv6_udp_en =
3546                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3547                 vport[i].rss_tuple_sets.ipv6_sctp_en =
3548                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3549                 vport[i].rss_tuple_sets.ipv6_fragment_en =
3550                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3551
3552                 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3553
3554                 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3555         }
3556
3557         hclge_rss_indir_init_cfg(hdev);
3558 }
3559
3560 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3561                                 int vector_id, bool en,
3562                                 struct hnae3_ring_chain_node *ring_chain)
3563 {
3564         struct hclge_dev *hdev = vport->back;
3565         struct hnae3_ring_chain_node *node;
3566         struct hclge_desc desc;
3567         struct hclge_ctrl_vector_chain_cmd *req
3568                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3569         enum hclge_cmd_status status;
3570         enum hclge_opcode_type op;
3571         u16 tqp_type_and_id;
3572         int i;
3573
3574         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3575         hclge_cmd_setup_basic_desc(&desc, op, false);
3576         req->int_vector_id = vector_id;
3577
3578         i = 0;
3579         for (node = ring_chain; node; node = node->next) {
3580                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3581                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3582                                 HCLGE_INT_TYPE_S,
3583                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3584                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3585                                 HCLGE_TQP_ID_S, node->tqp_index);
3586                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3587                                 HCLGE_INT_GL_IDX_S,
3588                                 hnae3_get_field(node->int_gl_idx,
3589                                                 HNAE3_RING_GL_IDX_M,
3590                                                 HNAE3_RING_GL_IDX_S));
3591                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3592                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3593                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3594                         req->vfid = vport->vport_id;
3595
3596                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
3597                         if (status) {
3598                                 dev_err(&hdev->pdev->dev,
3599                                         "Map TQP fail, status is %d.\n",
3600                                         status);
3601                                 return -EIO;
3602                         }
3603                         i = 0;
3604
3605                         hclge_cmd_setup_basic_desc(&desc,
3606                                                    op,
3607                                                    false);
3608                         req->int_vector_id = vector_id;
3609                 }
3610         }
3611
3612         if (i > 0) {
3613                 req->int_cause_num = i;
3614                 req->vfid = vport->vport_id;
3615                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3616                 if (status) {
3617                         dev_err(&hdev->pdev->dev,
3618                                 "Map TQP fail, status is %d.\n", status);
3619                         return -EIO;
3620                 }
3621         }
3622
3623         return 0;
3624 }
3625
3626 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3627                                     int vector,
3628                                     struct hnae3_ring_chain_node *ring_chain)
3629 {
3630         struct hclge_vport *vport = hclge_get_vport(handle);
3631         struct hclge_dev *hdev = vport->back;
3632         int vector_id;
3633
3634         vector_id = hclge_get_vector_index(hdev, vector);
3635         if (vector_id < 0) {
3636                 dev_err(&hdev->pdev->dev,
3637                         "Get vector index fail. vector_id =%d\n", vector_id);
3638                 return vector_id;
3639         }
3640
3641         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3642 }
3643
3644 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3645                                        int vector,
3646                                        struct hnae3_ring_chain_node *ring_chain)
3647 {
3648         struct hclge_vport *vport = hclge_get_vport(handle);
3649         struct hclge_dev *hdev = vport->back;
3650         int vector_id, ret;
3651
3652         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3653                 return 0;
3654
3655         vector_id = hclge_get_vector_index(hdev, vector);
3656         if (vector_id < 0) {
3657                 dev_err(&handle->pdev->dev,
3658                         "Get vector index fail. ret =%d\n", vector_id);
3659                 return vector_id;
3660         }
3661
3662         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3663         if (ret)
3664                 dev_err(&handle->pdev->dev,
3665                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3666                         vector_id,
3667                         ret);
3668
3669         return ret;
3670 }
3671
3672 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3673                                struct hclge_promisc_param *param)
3674 {
3675         struct hclge_promisc_cfg_cmd *req;
3676         struct hclge_desc desc;
3677         int ret;
3678
3679         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3680
3681         req = (struct hclge_promisc_cfg_cmd *)desc.data;
3682         req->vf_id = param->vf_id;
3683
3684         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3685          * pdev revision(0x20), new revision support them. The
3686          * value of this two fields will not return error when driver
3687          * send command to fireware in revision(0x20).
3688          */
3689         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3690                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3691
3692         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3693         if (ret)
3694                 dev_err(&hdev->pdev->dev,
3695                         "Set promisc mode fail, status is %d.\n", ret);
3696
3697         return ret;
3698 }
3699
3700 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3701                               bool en_mc, bool en_bc, int vport_id)
3702 {
3703         if (!param)
3704                 return;
3705
3706         memset(param, 0, sizeof(struct hclge_promisc_param));
3707         if (en_uc)
3708                 param->enable = HCLGE_PROMISC_EN_UC;
3709         if (en_mc)
3710                 param->enable |= HCLGE_PROMISC_EN_MC;
3711         if (en_bc)
3712                 param->enable |= HCLGE_PROMISC_EN_BC;
3713         param->vf_id = vport_id;
3714 }
3715
3716 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3717                                   bool en_mc_pmc)
3718 {
3719         struct hclge_vport *vport = hclge_get_vport(handle);
3720         struct hclge_dev *hdev = vport->back;
3721         struct hclge_promisc_param param;
3722
3723         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
3724                                  vport->vport_id);
3725         return hclge_cmd_set_promisc_mode(hdev, &param);
3726 }
3727
3728 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3729 {
3730         struct hclge_get_fd_mode_cmd *req;
3731         struct hclge_desc desc;
3732         int ret;
3733
3734         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3735
3736         req = (struct hclge_get_fd_mode_cmd *)desc.data;
3737
3738         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3739         if (ret) {
3740                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3741                 return ret;
3742         }
3743
3744         *fd_mode = req->mode;
3745
3746         return ret;
3747 }
3748
3749 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3750                                    u32 *stage1_entry_num,
3751                                    u32 *stage2_entry_num,
3752                                    u16 *stage1_counter_num,
3753                                    u16 *stage2_counter_num)
3754 {
3755         struct hclge_get_fd_allocation_cmd *req;
3756         struct hclge_desc desc;
3757         int ret;
3758
3759         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3760
3761         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3762
3763         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3764         if (ret) {
3765                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3766                         ret);
3767                 return ret;
3768         }
3769
3770         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3771         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3772         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3773         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3774
3775         return ret;
3776 }
3777
3778 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3779 {
3780         struct hclge_set_fd_key_config_cmd *req;
3781         struct hclge_fd_key_cfg *stage;
3782         struct hclge_desc desc;
3783         int ret;
3784
3785         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3786
3787         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3788         stage = &hdev->fd_cfg.key_cfg[stage_num];
3789         req->stage = stage_num;
3790         req->key_select = stage->key_sel;
3791         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3792         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3793         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3794         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3795         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3796         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3797
3798         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3799         if (ret)
3800                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3801
3802         return ret;
3803 }
3804
3805 static int hclge_init_fd_config(struct hclge_dev *hdev)
3806 {
3807 #define LOW_2_WORDS             0x03
3808         struct hclge_fd_key_cfg *key_cfg;
3809         int ret;
3810
3811         if (!hnae3_dev_fd_supported(hdev))
3812                 return 0;
3813
3814         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3815         if (ret)
3816                 return ret;
3817
3818         switch (hdev->fd_cfg.fd_mode) {
3819         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3820                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3821                 break;
3822         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3823                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3824                 break;
3825         default:
3826                 dev_err(&hdev->pdev->dev,
3827                         "Unsupported flow director mode %d\n",
3828                         hdev->fd_cfg.fd_mode);
3829                 return -EOPNOTSUPP;
3830         }
3831
3832         hdev->fd_cfg.fd_en = true;
3833         hdev->fd_cfg.proto_support =
3834                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3835                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3836         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3837         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3838         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3839         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3840         key_cfg->outer_sipv6_word_en = 0;
3841         key_cfg->outer_dipv6_word_en = 0;
3842
3843         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3844                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3845                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3846                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3847
3848         /* If use max 400bit key, we can support tuples for ether type */
3849         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3850                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
3851                 key_cfg->tuple_active |=
3852                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3853         }
3854
3855         /* roce_type is used to filter roce frames
3856          * dst_vport is used to specify the rule
3857          */
3858         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
3859
3860         ret = hclge_get_fd_allocation(hdev,
3861                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
3862                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
3863                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
3864                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
3865         if (ret)
3866                 return ret;
3867
3868         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
3869 }
3870
3871 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
3872                                 int loc, u8 *key, bool is_add)
3873 {
3874         struct hclge_fd_tcam_config_1_cmd *req1;
3875         struct hclge_fd_tcam_config_2_cmd *req2;
3876         struct hclge_fd_tcam_config_3_cmd *req3;
3877         struct hclge_desc desc[3];
3878         int ret;
3879
3880         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
3881         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3882         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
3883         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3884         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
3885
3886         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
3887         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
3888         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
3889
3890         req1->stage = stage;
3891         req1->xy_sel = sel_x ? 1 : 0;
3892         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
3893         req1->index = cpu_to_le32(loc);
3894         req1->entry_vld = sel_x ? is_add : 0;
3895
3896         if (key) {
3897                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
3898                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
3899                        sizeof(req2->tcam_data));
3900                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
3901                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
3902         }
3903
3904         ret = hclge_cmd_send(&hdev->hw, desc, 3);
3905         if (ret)
3906                 dev_err(&hdev->pdev->dev,
3907                         "config tcam key fail, ret=%d\n",
3908                         ret);
3909
3910         return ret;
3911 }
3912
3913 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
3914                               struct hclge_fd_ad_data *action)
3915 {
3916         struct hclge_fd_ad_config_cmd *req;
3917         struct hclge_desc desc;
3918         u64 ad_data = 0;
3919         int ret;
3920
3921         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
3922
3923         req = (struct hclge_fd_ad_config_cmd *)desc.data;
3924         req->index = cpu_to_le32(loc);
3925         req->stage = stage;
3926
3927         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
3928                       action->write_rule_id_to_bd);
3929         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
3930                         action->rule_id);
3931         ad_data <<= 32;
3932         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
3933         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
3934                       action->forward_to_direct_queue);
3935         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
3936                         action->queue_id);
3937         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
3938         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
3939                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
3940         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
3941         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
3942                         action->counter_id);
3943
3944         req->ad_data = cpu_to_le64(ad_data);
3945         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3946         if (ret)
3947                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
3948
3949         return ret;
3950 }
3951
3952 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
3953                                    struct hclge_fd_rule *rule)
3954 {
3955         u16 tmp_x_s, tmp_y_s;
3956         u32 tmp_x_l, tmp_y_l;
3957         int i;
3958
3959         if (rule->unused_tuple & tuple_bit)
3960                 return true;
3961
3962         switch (tuple_bit) {
3963         case 0:
3964                 return false;
3965         case BIT(INNER_DST_MAC):
3966                 for (i = 0; i < 6; i++) {
3967                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
3968                                rule->tuples_mask.dst_mac[i]);
3969                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
3970                                rule->tuples_mask.dst_mac[i]);
3971                 }
3972
3973                 return true;
3974         case BIT(INNER_SRC_MAC):
3975                 for (i = 0; i < 6; i++) {
3976                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
3977                                rule->tuples.src_mac[i]);
3978                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
3979                                rule->tuples.src_mac[i]);
3980                 }
3981
3982                 return true;
3983         case BIT(INNER_VLAN_TAG_FST):
3984                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
3985                        rule->tuples_mask.vlan_tag1);
3986                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
3987                        rule->tuples_mask.vlan_tag1);
3988                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3989                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3990
3991                 return true;
3992         case BIT(INNER_ETH_TYPE):
3993                 calc_x(tmp_x_s, rule->tuples.ether_proto,
3994                        rule->tuples_mask.ether_proto);
3995                 calc_y(tmp_y_s, rule->tuples.ether_proto,
3996                        rule->tuples_mask.ether_proto);
3997                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3998                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3999
4000                 return true;
4001         case BIT(INNER_IP_TOS):
4002                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4003                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4004
4005                 return true;
4006         case BIT(INNER_IP_PROTO):
4007                 calc_x(*key_x, rule->tuples.ip_proto,
4008                        rule->tuples_mask.ip_proto);
4009                 calc_y(*key_y, rule->tuples.ip_proto,
4010                        rule->tuples_mask.ip_proto);
4011
4012                 return true;
4013         case BIT(INNER_SRC_IP):
4014                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4015                        rule->tuples_mask.src_ip[3]);
4016                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4017                        rule->tuples_mask.src_ip[3]);
4018                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4019                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4020
4021                 return true;
4022         case BIT(INNER_DST_IP):
4023                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4024                        rule->tuples_mask.dst_ip[3]);
4025                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4026                        rule->tuples_mask.dst_ip[3]);
4027                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4028                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4029
4030                 return true;
4031         case BIT(INNER_SRC_PORT):
4032                 calc_x(tmp_x_s, rule->tuples.src_port,
4033                        rule->tuples_mask.src_port);
4034                 calc_y(tmp_y_s, rule->tuples.src_port,
4035                        rule->tuples_mask.src_port);
4036                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4037                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4038
4039                 return true;
4040         case BIT(INNER_DST_PORT):
4041                 calc_x(tmp_x_s, rule->tuples.dst_port,
4042                        rule->tuples_mask.dst_port);
4043                 calc_y(tmp_y_s, rule->tuples.dst_port,
4044                        rule->tuples_mask.dst_port);
4045                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4046                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4047
4048                 return true;
4049         default:
4050                 return false;
4051         }
4052 }
4053
4054 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4055                                  u8 vf_id, u8 network_port_id)
4056 {
4057         u32 port_number = 0;
4058
4059         if (port_type == HOST_PORT) {
4060                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4061                                 pf_id);
4062                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4063                                 vf_id);
4064                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4065         } else {
4066                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4067                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4068                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4069         }
4070
4071         return port_number;
4072 }
4073
4074 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4075                                        __le32 *key_x, __le32 *key_y,
4076                                        struct hclge_fd_rule *rule)
4077 {
4078         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4079         u8 cur_pos = 0, tuple_size, shift_bits;
4080         int i;
4081
4082         for (i = 0; i < MAX_META_DATA; i++) {
4083                 tuple_size = meta_data_key_info[i].key_length;
4084                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4085
4086                 switch (tuple_bit) {
4087                 case BIT(ROCE_TYPE):
4088                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4089                         cur_pos += tuple_size;
4090                         break;
4091                 case BIT(DST_VPORT):
4092                         port_number = hclge_get_port_number(HOST_PORT, 0,
4093                                                             rule->vf_id, 0);
4094                         hnae3_set_field(meta_data,
4095                                         GENMASK(cur_pos + tuple_size, cur_pos),
4096                                         cur_pos, port_number);
4097                         cur_pos += tuple_size;
4098                         break;
4099                 default:
4100                         break;
4101                 }
4102         }
4103
4104         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4105         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4106         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4107
4108         *key_x = cpu_to_le32(tmp_x << shift_bits);
4109         *key_y = cpu_to_le32(tmp_y << shift_bits);
4110 }
4111
4112 /* A complete key is combined with meta data key and tuple key.
4113  * Meta data key is stored at the MSB region, and tuple key is stored at
4114  * the LSB region, unused bits will be filled 0.
4115  */
4116 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4117                             struct hclge_fd_rule *rule)
4118 {
4119         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4120         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4121         u8 *cur_key_x, *cur_key_y;
4122         int i, ret, tuple_size;
4123         u8 meta_data_region;
4124
4125         memset(key_x, 0, sizeof(key_x));
4126         memset(key_y, 0, sizeof(key_y));
4127         cur_key_x = key_x;
4128         cur_key_y = key_y;
4129
4130         for (i = 0 ; i < MAX_TUPLE; i++) {
4131                 bool tuple_valid;
4132                 u32 check_tuple;
4133
4134                 tuple_size = tuple_key_info[i].key_length / 8;
4135                 check_tuple = key_cfg->tuple_active & BIT(i);
4136
4137                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4138                                                      cur_key_y, rule);
4139                 if (tuple_valid) {
4140                         cur_key_x += tuple_size;
4141                         cur_key_y += tuple_size;
4142                 }
4143         }
4144
4145         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4146                         MAX_META_DATA_LENGTH / 8;
4147
4148         hclge_fd_convert_meta_data(key_cfg,
4149                                    (__le32 *)(key_x + meta_data_region),
4150                                    (__le32 *)(key_y + meta_data_region),
4151                                    rule);
4152
4153         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4154                                    true);
4155         if (ret) {
4156                 dev_err(&hdev->pdev->dev,
4157                         "fd key_y config fail, loc=%d, ret=%d\n",
4158                         rule->queue_id, ret);
4159                 return ret;
4160         }
4161
4162         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4163                                    true);
4164         if (ret)
4165                 dev_err(&hdev->pdev->dev,
4166                         "fd key_x config fail, loc=%d, ret=%d\n",
4167                         rule->queue_id, ret);
4168         return ret;
4169 }
4170
4171 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4172                                struct hclge_fd_rule *rule)
4173 {
4174         struct hclge_fd_ad_data ad_data;
4175
4176         ad_data.ad_id = rule->location;
4177
4178         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4179                 ad_data.drop_packet = true;
4180                 ad_data.forward_to_direct_queue = false;
4181                 ad_data.queue_id = 0;
4182         } else {
4183                 ad_data.drop_packet = false;
4184                 ad_data.forward_to_direct_queue = true;
4185                 ad_data.queue_id = rule->queue_id;
4186         }
4187
4188         ad_data.use_counter = false;
4189         ad_data.counter_id = 0;
4190
4191         ad_data.use_next_stage = false;
4192         ad_data.next_input_key = 0;
4193
4194         ad_data.write_rule_id_to_bd = true;
4195         ad_data.rule_id = rule->location;
4196
4197         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4198 }
4199
4200 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4201                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4202 {
4203         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4204         struct ethtool_usrip4_spec *usr_ip4_spec;
4205         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4206         struct ethtool_usrip6_spec *usr_ip6_spec;
4207         struct ethhdr *ether_spec;
4208
4209         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4210                 return -EINVAL;
4211
4212         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4213                 return -EOPNOTSUPP;
4214
4215         if ((fs->flow_type & FLOW_EXT) &&
4216             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4217                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4218                 return -EOPNOTSUPP;
4219         }
4220
4221         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4222         case SCTP_V4_FLOW:
4223         case TCP_V4_FLOW:
4224         case UDP_V4_FLOW:
4225                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4226                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4227
4228                 if (!tcp_ip4_spec->ip4src)
4229                         *unused |= BIT(INNER_SRC_IP);
4230
4231                 if (!tcp_ip4_spec->ip4dst)
4232                         *unused |= BIT(INNER_DST_IP);
4233
4234                 if (!tcp_ip4_spec->psrc)
4235                         *unused |= BIT(INNER_SRC_PORT);
4236
4237                 if (!tcp_ip4_spec->pdst)
4238                         *unused |= BIT(INNER_DST_PORT);
4239
4240                 if (!tcp_ip4_spec->tos)
4241                         *unused |= BIT(INNER_IP_TOS);
4242
4243                 break;
4244         case IP_USER_FLOW:
4245                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4246                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4247                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4248
4249                 if (!usr_ip4_spec->ip4src)
4250                         *unused |= BIT(INNER_SRC_IP);
4251
4252                 if (!usr_ip4_spec->ip4dst)
4253                         *unused |= BIT(INNER_DST_IP);
4254
4255                 if (!usr_ip4_spec->tos)
4256                         *unused |= BIT(INNER_IP_TOS);
4257
4258                 if (!usr_ip4_spec->proto)
4259                         *unused |= BIT(INNER_IP_PROTO);
4260
4261                 if (usr_ip4_spec->l4_4_bytes)
4262                         return -EOPNOTSUPP;
4263
4264                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4265                         return -EOPNOTSUPP;
4266
4267                 break;
4268         case SCTP_V6_FLOW:
4269         case TCP_V6_FLOW:
4270         case UDP_V6_FLOW:
4271                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4272                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4273                         BIT(INNER_IP_TOS);
4274
4275                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4276                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4277                         *unused |= BIT(INNER_SRC_IP);
4278
4279                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4280                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4281                         *unused |= BIT(INNER_DST_IP);
4282
4283                 if (!tcp_ip6_spec->psrc)
4284                         *unused |= BIT(INNER_SRC_PORT);
4285
4286                 if (!tcp_ip6_spec->pdst)
4287                         *unused |= BIT(INNER_DST_PORT);
4288
4289                 if (tcp_ip6_spec->tclass)
4290                         return -EOPNOTSUPP;
4291
4292                 break;
4293         case IPV6_USER_FLOW:
4294                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4295                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4296                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4297                         BIT(INNER_DST_PORT);
4298
4299                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4300                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4301                         *unused |= BIT(INNER_SRC_IP);
4302
4303                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4304                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4305                         *unused |= BIT(INNER_DST_IP);
4306
4307                 if (!usr_ip6_spec->l4_proto)
4308                         *unused |= BIT(INNER_IP_PROTO);
4309
4310                 if (usr_ip6_spec->tclass)
4311                         return -EOPNOTSUPP;
4312
4313                 if (usr_ip6_spec->l4_4_bytes)
4314                         return -EOPNOTSUPP;
4315
4316                 break;
4317         case ETHER_FLOW:
4318                 ether_spec = &fs->h_u.ether_spec;
4319                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4320                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4321                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4322
4323                 if (is_zero_ether_addr(ether_spec->h_source))
4324                         *unused |= BIT(INNER_SRC_MAC);
4325
4326                 if (is_zero_ether_addr(ether_spec->h_dest))
4327                         *unused |= BIT(INNER_DST_MAC);
4328
4329                 if (!ether_spec->h_proto)
4330                         *unused |= BIT(INNER_ETH_TYPE);
4331
4332                 break;
4333         default:
4334                 return -EOPNOTSUPP;
4335         }
4336
4337         if ((fs->flow_type & FLOW_EXT)) {
4338                 if (fs->h_ext.vlan_etype)
4339                         return -EOPNOTSUPP;
4340                 if (!fs->h_ext.vlan_tci)
4341                         *unused |= BIT(INNER_VLAN_TAG_FST);
4342
4343                 if (fs->m_ext.vlan_tci) {
4344                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4345                                 return -EINVAL;
4346                 }
4347         } else {
4348                 *unused |= BIT(INNER_VLAN_TAG_FST);
4349         }
4350
4351         if (fs->flow_type & FLOW_MAC_EXT) {
4352                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4353                         return -EOPNOTSUPP;
4354
4355                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4356                         *unused |= BIT(INNER_DST_MAC);
4357                 else
4358                         *unused &= ~(BIT(INNER_DST_MAC));
4359         }
4360
4361         return 0;
4362 }
4363
4364 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4365 {
4366         struct hclge_fd_rule *rule = NULL;
4367         struct hlist_node *node2;
4368
4369         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4370                 if (rule->location >= location)
4371                         break;
4372         }
4373
4374         return  rule && rule->location == location;
4375 }
4376
4377 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4378                                      struct hclge_fd_rule *new_rule,
4379                                      u16 location,
4380                                      bool is_add)
4381 {
4382         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4383         struct hlist_node *node2;
4384
4385         if (is_add && !new_rule)
4386                 return -EINVAL;
4387
4388         hlist_for_each_entry_safe(rule, node2,
4389                                   &hdev->fd_rule_list, rule_node) {
4390                 if (rule->location >= location)
4391                         break;
4392                 parent = rule;
4393         }
4394
4395         if (rule && rule->location == location) {
4396                 hlist_del(&rule->rule_node);
4397                 kfree(rule);
4398                 hdev->hclge_fd_rule_num--;
4399
4400                 if (!is_add)
4401                         return 0;
4402
4403         } else if (!is_add) {
4404                 dev_err(&hdev->pdev->dev,
4405                         "delete fail, rule %d is inexistent\n",
4406                         location);
4407                 return -EINVAL;
4408         }
4409
4410         INIT_HLIST_NODE(&new_rule->rule_node);
4411
4412         if (parent)
4413                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4414         else
4415                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4416
4417         hdev->hclge_fd_rule_num++;
4418
4419         return 0;
4420 }
4421
4422 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4423                               struct ethtool_rx_flow_spec *fs,
4424                               struct hclge_fd_rule *rule)
4425 {
4426         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4427
4428         switch (flow_type) {
4429         case SCTP_V4_FLOW:
4430         case TCP_V4_FLOW:
4431         case UDP_V4_FLOW:
4432                 rule->tuples.src_ip[3] =
4433                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4434                 rule->tuples_mask.src_ip[3] =
4435                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4436
4437                 rule->tuples.dst_ip[3] =
4438                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4439                 rule->tuples_mask.dst_ip[3] =
4440                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4441
4442                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4443                 rule->tuples_mask.src_port =
4444                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4445
4446                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4447                 rule->tuples_mask.dst_port =
4448                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4449
4450                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4451                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4452
4453                 rule->tuples.ether_proto = ETH_P_IP;
4454                 rule->tuples_mask.ether_proto = 0xFFFF;
4455
4456                 break;
4457         case IP_USER_FLOW:
4458                 rule->tuples.src_ip[3] =
4459                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4460                 rule->tuples_mask.src_ip[3] =
4461                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4462
4463                 rule->tuples.dst_ip[3] =
4464                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4465                 rule->tuples_mask.dst_ip[3] =
4466                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4467
4468                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4469                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4470
4471                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4472                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4473
4474                 rule->tuples.ether_proto = ETH_P_IP;
4475                 rule->tuples_mask.ether_proto = 0xFFFF;
4476
4477                 break;
4478         case SCTP_V6_FLOW:
4479         case TCP_V6_FLOW:
4480         case UDP_V6_FLOW:
4481                 be32_to_cpu_array(rule->tuples.src_ip,
4482                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
4483                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4484                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
4485
4486                 be32_to_cpu_array(rule->tuples.dst_ip,
4487                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
4488                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4489                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
4490
4491                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4492                 rule->tuples_mask.src_port =
4493                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4494
4495                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4496                 rule->tuples_mask.dst_port =
4497                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4498
4499                 rule->tuples.ether_proto = ETH_P_IPV6;
4500                 rule->tuples_mask.ether_proto = 0xFFFF;
4501
4502                 break;
4503         case IPV6_USER_FLOW:
4504                 be32_to_cpu_array(rule->tuples.src_ip,
4505                                   fs->h_u.usr_ip6_spec.ip6src, 4);
4506                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4507                                   fs->m_u.usr_ip6_spec.ip6src, 4);
4508
4509                 be32_to_cpu_array(rule->tuples.dst_ip,
4510                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
4511                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4512                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
4513
4514                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4515                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4516
4517                 rule->tuples.ether_proto = ETH_P_IPV6;
4518                 rule->tuples_mask.ether_proto = 0xFFFF;
4519
4520                 break;
4521         case ETHER_FLOW:
4522                 ether_addr_copy(rule->tuples.src_mac,
4523                                 fs->h_u.ether_spec.h_source);
4524                 ether_addr_copy(rule->tuples_mask.src_mac,
4525                                 fs->m_u.ether_spec.h_source);
4526
4527                 ether_addr_copy(rule->tuples.dst_mac,
4528                                 fs->h_u.ether_spec.h_dest);
4529                 ether_addr_copy(rule->tuples_mask.dst_mac,
4530                                 fs->m_u.ether_spec.h_dest);
4531
4532                 rule->tuples.ether_proto =
4533                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4534                 rule->tuples_mask.ether_proto =
4535                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4536
4537                 break;
4538         default:
4539                 return -EOPNOTSUPP;
4540         }
4541
4542         switch (flow_type) {
4543         case SCTP_V4_FLOW:
4544         case SCTP_V6_FLOW:
4545                 rule->tuples.ip_proto = IPPROTO_SCTP;
4546                 rule->tuples_mask.ip_proto = 0xFF;
4547                 break;
4548         case TCP_V4_FLOW:
4549         case TCP_V6_FLOW:
4550                 rule->tuples.ip_proto = IPPROTO_TCP;
4551                 rule->tuples_mask.ip_proto = 0xFF;
4552                 break;
4553         case UDP_V4_FLOW:
4554         case UDP_V6_FLOW:
4555                 rule->tuples.ip_proto = IPPROTO_UDP;
4556                 rule->tuples_mask.ip_proto = 0xFF;
4557                 break;
4558         default:
4559                 break;
4560         }
4561
4562         if ((fs->flow_type & FLOW_EXT)) {
4563                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4564                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4565         }
4566
4567         if (fs->flow_type & FLOW_MAC_EXT) {
4568                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4569                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4570         }
4571
4572         return 0;
4573 }
4574
4575 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4576                               struct ethtool_rxnfc *cmd)
4577 {
4578         struct hclge_vport *vport = hclge_get_vport(handle);
4579         struct hclge_dev *hdev = vport->back;
4580         u16 dst_vport_id = 0, q_index = 0;
4581         struct ethtool_rx_flow_spec *fs;
4582         struct hclge_fd_rule *rule;
4583         u32 unused = 0;
4584         u8 action;
4585         int ret;
4586
4587         if (!hnae3_dev_fd_supported(hdev))
4588                 return -EOPNOTSUPP;
4589
4590         if (!hdev->fd_cfg.fd_en) {
4591                 dev_warn(&hdev->pdev->dev,
4592                          "Please enable flow director first\n");
4593                 return -EOPNOTSUPP;
4594         }
4595
4596         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4597
4598         ret = hclge_fd_check_spec(hdev, fs, &unused);
4599         if (ret) {
4600                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4601                 return ret;
4602         }
4603
4604         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4605                 action = HCLGE_FD_ACTION_DROP_PACKET;
4606         } else {
4607                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4608                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4609                 u16 tqps;
4610
4611                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4612                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4613
4614                 if (ring >= tqps) {
4615                         dev_err(&hdev->pdev->dev,
4616                                 "Error: queue id (%d) > max tqp num (%d)\n",
4617                                 ring, tqps - 1);
4618                         return -EINVAL;
4619                 }
4620
4621                 if (vf > hdev->num_req_vfs) {
4622                         dev_err(&hdev->pdev->dev,
4623                                 "Error: vf id (%d) > max vf num (%d)\n",
4624                                 vf, hdev->num_req_vfs);
4625                         return -EINVAL;
4626                 }
4627
4628                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4629                 q_index = ring;
4630         }
4631
4632         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4633         if (!rule)
4634                 return -ENOMEM;
4635
4636         ret = hclge_fd_get_tuple(hdev, fs, rule);
4637         if (ret)
4638                 goto free_rule;
4639
4640         rule->flow_type = fs->flow_type;
4641
4642         rule->location = fs->location;
4643         rule->unused_tuple = unused;
4644         rule->vf_id = dst_vport_id;
4645         rule->queue_id = q_index;
4646         rule->action = action;
4647
4648         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4649         if (ret)
4650                 goto free_rule;
4651
4652         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4653         if (ret)
4654                 goto free_rule;
4655
4656         ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4657         if (ret)
4658                 goto free_rule;
4659
4660         return ret;
4661
4662 free_rule:
4663         kfree(rule);
4664         return ret;
4665 }
4666
4667 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4668                               struct ethtool_rxnfc *cmd)
4669 {
4670         struct hclge_vport *vport = hclge_get_vport(handle);
4671         struct hclge_dev *hdev = vport->back;
4672         struct ethtool_rx_flow_spec *fs;
4673         int ret;
4674
4675         if (!hnae3_dev_fd_supported(hdev))
4676                 return -EOPNOTSUPP;
4677
4678         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4679
4680         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4681                 return -EINVAL;
4682
4683         if (!hclge_fd_rule_exist(hdev, fs->location)) {
4684                 dev_err(&hdev->pdev->dev,
4685                         "Delete fail, rule %d is inexistent\n",
4686                         fs->location);
4687                 return -ENOENT;
4688         }
4689
4690         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4691                                    fs->location, NULL, false);
4692         if (ret)
4693                 return ret;
4694
4695         return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4696                                          false);
4697 }
4698
4699 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4700                                      bool clear_list)
4701 {
4702         struct hclge_vport *vport = hclge_get_vport(handle);
4703         struct hclge_dev *hdev = vport->back;
4704         struct hclge_fd_rule *rule;
4705         struct hlist_node *node;
4706
4707         if (!hnae3_dev_fd_supported(hdev))
4708                 return;
4709
4710         if (clear_list) {
4711                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4712                                           rule_node) {
4713                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4714                                              rule->location, NULL, false);
4715                         hlist_del(&rule->rule_node);
4716                         kfree(rule);
4717                         hdev->hclge_fd_rule_num--;
4718                 }
4719         } else {
4720                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4721                                           rule_node)
4722                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4723                                              rule->location, NULL, false);
4724         }
4725 }
4726
4727 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4728 {
4729         struct hclge_vport *vport = hclge_get_vport(handle);
4730         struct hclge_dev *hdev = vport->back;
4731         struct hclge_fd_rule *rule;
4732         struct hlist_node *node;
4733         int ret;
4734
4735         /* Return ok here, because reset error handling will check this
4736          * return value. If error is returned here, the reset process will
4737          * fail.
4738          */
4739         if (!hnae3_dev_fd_supported(hdev))
4740                 return 0;
4741
4742         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4743                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4744                 if (!ret)
4745                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4746
4747                 if (ret) {
4748                         dev_warn(&hdev->pdev->dev,
4749                                  "Restore rule %d failed, remove it\n",
4750                                  rule->location);
4751                         hlist_del(&rule->rule_node);
4752                         kfree(rule);
4753                         hdev->hclge_fd_rule_num--;
4754                 }
4755         }
4756         return 0;
4757 }
4758
4759 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4760                                  struct ethtool_rxnfc *cmd)
4761 {
4762         struct hclge_vport *vport = hclge_get_vport(handle);
4763         struct hclge_dev *hdev = vport->back;
4764
4765         if (!hnae3_dev_fd_supported(hdev))
4766                 return -EOPNOTSUPP;
4767
4768         cmd->rule_cnt = hdev->hclge_fd_rule_num;
4769         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4770
4771         return 0;
4772 }
4773
4774 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4775                                   struct ethtool_rxnfc *cmd)
4776 {
4777         struct hclge_vport *vport = hclge_get_vport(handle);
4778         struct hclge_fd_rule *rule = NULL;
4779         struct hclge_dev *hdev = vport->back;
4780         struct ethtool_rx_flow_spec *fs;
4781         struct hlist_node *node2;
4782
4783         if (!hnae3_dev_fd_supported(hdev))
4784                 return -EOPNOTSUPP;
4785
4786         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4787
4788         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4789                 if (rule->location >= fs->location)
4790                         break;
4791         }
4792
4793         if (!rule || fs->location != rule->location)
4794                 return -ENOENT;
4795
4796         fs->flow_type = rule->flow_type;
4797         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4798         case SCTP_V4_FLOW:
4799         case TCP_V4_FLOW:
4800         case UDP_V4_FLOW:
4801                 fs->h_u.tcp_ip4_spec.ip4src =
4802                                 cpu_to_be32(rule->tuples.src_ip[3]);
4803                 fs->m_u.tcp_ip4_spec.ip4src =
4804                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4805                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4806
4807                 fs->h_u.tcp_ip4_spec.ip4dst =
4808                                 cpu_to_be32(rule->tuples.dst_ip[3]);
4809                 fs->m_u.tcp_ip4_spec.ip4dst =
4810                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
4811                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4812
4813                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4814                 fs->m_u.tcp_ip4_spec.psrc =
4815                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4816                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
4817
4818                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4819                 fs->m_u.tcp_ip4_spec.pdst =
4820                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
4821                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
4822
4823                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
4824                 fs->m_u.tcp_ip4_spec.tos =
4825                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4826                                 0 : rule->tuples_mask.ip_tos;
4827
4828                 break;
4829         case IP_USER_FLOW:
4830                 fs->h_u.usr_ip4_spec.ip4src =
4831                                 cpu_to_be32(rule->tuples.src_ip[3]);
4832                 fs->m_u.tcp_ip4_spec.ip4src =
4833                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4834                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4835
4836                 fs->h_u.usr_ip4_spec.ip4dst =
4837                                 cpu_to_be32(rule->tuples.dst_ip[3]);
4838                 fs->m_u.usr_ip4_spec.ip4dst =
4839                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
4840                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4841
4842                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
4843                 fs->m_u.usr_ip4_spec.tos =
4844                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4845                                 0 : rule->tuples_mask.ip_tos;
4846
4847                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
4848                 fs->m_u.usr_ip4_spec.proto =
4849                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4850                                 0 : rule->tuples_mask.ip_proto;
4851
4852                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
4853
4854                 break;
4855         case SCTP_V6_FLOW:
4856         case TCP_V6_FLOW:
4857         case UDP_V6_FLOW:
4858                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
4859                                   rule->tuples.src_ip, 4);
4860                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
4861                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
4862                 else
4863                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
4864                                           rule->tuples_mask.src_ip, 4);
4865
4866                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
4867                                   rule->tuples.dst_ip, 4);
4868                 if (rule->unused_tuple & BIT(INNER_DST_IP))
4869                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4870                 else
4871                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
4872                                           rule->tuples_mask.dst_ip, 4);
4873
4874                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4875                 fs->m_u.tcp_ip6_spec.psrc =
4876                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4877                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
4878
4879                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4880                 fs->m_u.tcp_ip6_spec.pdst =
4881                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
4882                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
4883
4884                 break;
4885         case IPV6_USER_FLOW:
4886                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
4887                                   rule->tuples.src_ip, 4);
4888                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
4889                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
4890                 else
4891                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
4892                                           rule->tuples_mask.src_ip, 4);
4893
4894                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
4895                                   rule->tuples.dst_ip, 4);
4896                 if (rule->unused_tuple & BIT(INNER_DST_IP))
4897                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4898                 else
4899                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
4900                                           rule->tuples_mask.dst_ip, 4);
4901
4902                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
4903                 fs->m_u.usr_ip6_spec.l4_proto =
4904                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4905                                 0 : rule->tuples_mask.ip_proto;
4906
4907                 break;
4908         case ETHER_FLOW:
4909                 ether_addr_copy(fs->h_u.ether_spec.h_source,
4910                                 rule->tuples.src_mac);
4911                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
4912                         eth_zero_addr(fs->m_u.ether_spec.h_source);
4913                 else
4914                         ether_addr_copy(fs->m_u.ether_spec.h_source,
4915                                         rule->tuples_mask.src_mac);
4916
4917                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
4918                                 rule->tuples.dst_mac);
4919                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
4920                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
4921                 else
4922                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
4923                                         rule->tuples_mask.dst_mac);
4924
4925                 fs->h_u.ether_spec.h_proto =
4926                                 cpu_to_be16(rule->tuples.ether_proto);
4927                 fs->m_u.ether_spec.h_proto =
4928                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
4929                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
4930
4931                 break;
4932         default:
4933                 return -EOPNOTSUPP;
4934         }
4935
4936         if (fs->flow_type & FLOW_EXT) {
4937                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
4938                 fs->m_ext.vlan_tci =
4939                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
4940                                 cpu_to_be16(VLAN_VID_MASK) :
4941                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
4942         }
4943
4944         if (fs->flow_type & FLOW_MAC_EXT) {
4945                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
4946                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
4947                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
4948                 else
4949                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
4950                                         rule->tuples_mask.dst_mac);
4951         }
4952
4953         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4954                 fs->ring_cookie = RX_CLS_FLOW_DISC;
4955         } else {
4956                 u64 vf_id;
4957
4958                 fs->ring_cookie = rule->queue_id;
4959                 vf_id = rule->vf_id;
4960                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
4961                 fs->ring_cookie |= vf_id;
4962         }
4963
4964         return 0;
4965 }
4966
4967 static int hclge_get_all_rules(struct hnae3_handle *handle,
4968                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
4969 {
4970         struct hclge_vport *vport = hclge_get_vport(handle);
4971         struct hclge_dev *hdev = vport->back;
4972         struct hclge_fd_rule *rule;
4973         struct hlist_node *node2;
4974         int cnt = 0;
4975
4976         if (!hnae3_dev_fd_supported(hdev))
4977                 return -EOPNOTSUPP;
4978
4979         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4980
4981         hlist_for_each_entry_safe(rule, node2,
4982                                   &hdev->fd_rule_list, rule_node) {
4983                 if (cnt == cmd->rule_cnt)
4984                         return -EMSGSIZE;
4985
4986                 rule_locs[cnt] = rule->location;
4987                 cnt++;
4988         }
4989
4990         cmd->rule_cnt = cnt;
4991
4992         return 0;
4993 }
4994
4995 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
4996 {
4997         struct hclge_vport *vport = hclge_get_vport(handle);
4998         struct hclge_dev *hdev = vport->back;
4999
5000         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5001                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5002 }
5003
5004 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5005 {
5006         struct hclge_vport *vport = hclge_get_vport(handle);
5007         struct hclge_dev *hdev = vport->back;
5008
5009         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5010 }
5011
5012 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5013 {
5014         struct hclge_vport *vport = hclge_get_vport(handle);
5015         struct hclge_dev *hdev = vport->back;
5016
5017         return hdev->reset_count;
5018 }
5019
5020 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5021 {
5022         struct hclge_vport *vport = hclge_get_vport(handle);
5023         struct hclge_dev *hdev = vport->back;
5024
5025         hdev->fd_cfg.fd_en = enable;
5026         if (!enable)
5027                 hclge_del_all_fd_entries(handle, false);
5028         else
5029                 hclge_restore_fd_entries(handle);
5030 }
5031
5032 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5033 {
5034         struct hclge_desc desc;
5035         struct hclge_config_mac_mode_cmd *req =
5036                 (struct hclge_config_mac_mode_cmd *)desc.data;
5037         u32 loop_en = 0;
5038         int ret;
5039
5040         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5041         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5042         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5043         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5044         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5045         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5046         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5047         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5048         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5049         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5050         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5051         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5052         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5053         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5054         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5055         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5056
5057         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5058         if (ret)
5059                 dev_err(&hdev->pdev->dev,
5060                         "mac enable fail, ret =%d.\n", ret);
5061 }
5062
5063 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5064 {
5065         struct hclge_config_mac_mode_cmd *req;
5066         struct hclge_desc desc;
5067         u32 loop_en;
5068         int ret;
5069
5070         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5071         /* 1 Read out the MAC mode config at first */
5072         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5073         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5074         if (ret) {
5075                 dev_err(&hdev->pdev->dev,
5076                         "mac loopback get fail, ret =%d.\n", ret);
5077                 return ret;
5078         }
5079
5080         /* 2 Then setup the loopback flag */
5081         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5082         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5083         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5084         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5085
5086         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5087
5088         /* 3 Config mac work mode with loopback flag
5089          * and its original configure parameters
5090          */
5091         hclge_cmd_reuse_desc(&desc, false);
5092         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5093         if (ret)
5094                 dev_err(&hdev->pdev->dev,
5095                         "mac loopback set fail, ret =%d.\n", ret);
5096         return ret;
5097 }
5098
5099 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5100                                      enum hnae3_loop loop_mode)
5101 {
5102 #define HCLGE_SERDES_RETRY_MS   10
5103 #define HCLGE_SERDES_RETRY_NUM  100
5104         struct hclge_serdes_lb_cmd *req;
5105         struct hclge_desc desc;
5106         int ret, i = 0;
5107         u8 loop_mode_b;
5108
5109         req = (struct hclge_serdes_lb_cmd *)desc.data;
5110         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5111
5112         switch (loop_mode) {
5113         case HNAE3_LOOP_SERIAL_SERDES:
5114                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5115                 break;
5116         case HNAE3_LOOP_PARALLEL_SERDES:
5117                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5118                 break;
5119         default:
5120                 dev_err(&hdev->pdev->dev,
5121                         "unsupported serdes loopback mode %d\n", loop_mode);
5122                 return -ENOTSUPP;
5123         }
5124
5125         if (en) {
5126                 req->enable = loop_mode_b;
5127                 req->mask = loop_mode_b;
5128         } else {
5129                 req->mask = loop_mode_b;
5130         }
5131
5132         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5133         if (ret) {
5134                 dev_err(&hdev->pdev->dev,
5135                         "serdes loopback set fail, ret = %d\n", ret);
5136                 return ret;
5137         }
5138
5139         do {
5140                 msleep(HCLGE_SERDES_RETRY_MS);
5141                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5142                                            true);
5143                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5144                 if (ret) {
5145                         dev_err(&hdev->pdev->dev,
5146                                 "serdes loopback get, ret = %d\n", ret);
5147                         return ret;
5148                 }
5149         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5150                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5151
5152         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5153                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5154                 return -EBUSY;
5155         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5156                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5157                 return -EIO;
5158         }
5159
5160         hclge_cfg_mac_mode(hdev, en);
5161         return 0;
5162 }
5163
5164 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5165                             int stream_id, bool enable)
5166 {
5167         struct hclge_desc desc;
5168         struct hclge_cfg_com_tqp_queue_cmd *req =
5169                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5170         int ret;
5171
5172         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5173         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5174         req->stream_id = cpu_to_le16(stream_id);
5175         req->enable |= enable << HCLGE_TQP_ENABLE_B;
5176
5177         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5178         if (ret)
5179                 dev_err(&hdev->pdev->dev,
5180                         "Tqp enable fail, status =%d.\n", ret);
5181         return ret;
5182 }
5183
5184 static int hclge_set_loopback(struct hnae3_handle *handle,
5185                               enum hnae3_loop loop_mode, bool en)
5186 {
5187         struct hclge_vport *vport = hclge_get_vport(handle);
5188         struct hclge_dev *hdev = vport->back;
5189         int i, ret;
5190
5191         switch (loop_mode) {
5192         case HNAE3_LOOP_APP:
5193                 ret = hclge_set_app_loopback(hdev, en);
5194                 break;
5195         case HNAE3_LOOP_SERIAL_SERDES:
5196         case HNAE3_LOOP_PARALLEL_SERDES:
5197                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5198                 break;
5199         default:
5200                 ret = -ENOTSUPP;
5201                 dev_err(&hdev->pdev->dev,
5202                         "loop_mode %d is not supported\n", loop_mode);
5203                 break;
5204         }
5205
5206         for (i = 0; i < vport->alloc_tqps; i++) {
5207                 ret = hclge_tqp_enable(hdev, i, 0, en);
5208                 if (ret)
5209                         return ret;
5210         }
5211
5212         return 0;
5213 }
5214
5215 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5216 {
5217         struct hclge_vport *vport = hclge_get_vport(handle);
5218         struct hnae3_queue *queue;
5219         struct hclge_tqp *tqp;
5220         int i;
5221
5222         for (i = 0; i < vport->alloc_tqps; i++) {
5223                 queue = handle->kinfo.tqp[i];
5224                 tqp = container_of(queue, struct hclge_tqp, q);
5225                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5226         }
5227 }
5228
5229 static int hclge_ae_start(struct hnae3_handle *handle)
5230 {
5231         struct hclge_vport *vport = hclge_get_vport(handle);
5232         struct hclge_dev *hdev = vport->back;
5233
5234         /* mac enable */
5235         hclge_cfg_mac_mode(hdev, true);
5236         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5237         mod_timer(&hdev->service_timer, jiffies + HZ);
5238         hdev->hw.mac.link = 0;
5239
5240         /* reset tqp stats */
5241         hclge_reset_tqp_stats(handle);
5242
5243         hclge_mac_start_phy(hdev);
5244
5245         return 0;
5246 }
5247
5248 static void hclge_ae_stop(struct hnae3_handle *handle)
5249 {
5250         struct hclge_vport *vport = hclge_get_vport(handle);
5251         struct hclge_dev *hdev = vport->back;
5252
5253         set_bit(HCLGE_STATE_DOWN, &hdev->state);
5254
5255         del_timer_sync(&hdev->service_timer);
5256         cancel_work_sync(&hdev->service_task);
5257         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5258
5259         /* If it is not PF reset, the firmware will disable the MAC,
5260          * so it only need to stop phy here.
5261          */
5262         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5263             hdev->reset_type != HNAE3_FUNC_RESET) {
5264                 hclge_mac_stop_phy(hdev);
5265                 return;
5266         }
5267
5268         /* Mac disable */
5269         hclge_cfg_mac_mode(hdev, false);
5270
5271         hclge_mac_stop_phy(hdev);
5272
5273         /* reset tqp stats */
5274         hclge_reset_tqp_stats(handle);
5275         del_timer_sync(&hdev->service_timer);
5276         cancel_work_sync(&hdev->service_task);
5277         hclge_update_link_status(hdev);
5278 }
5279
5280 int hclge_vport_start(struct hclge_vport *vport)
5281 {
5282         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5283         vport->last_active_jiffies = jiffies;
5284         return 0;
5285 }
5286
5287 void hclge_vport_stop(struct hclge_vport *vport)
5288 {
5289         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5290 }
5291
5292 static int hclge_client_start(struct hnae3_handle *handle)
5293 {
5294         struct hclge_vport *vport = hclge_get_vport(handle);
5295
5296         return hclge_vport_start(vport);
5297 }
5298
5299 static void hclge_client_stop(struct hnae3_handle *handle)
5300 {
5301         struct hclge_vport *vport = hclge_get_vport(handle);
5302
5303         hclge_vport_stop(vport);
5304 }
5305
5306 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5307                                          u16 cmdq_resp, u8  resp_code,
5308                                          enum hclge_mac_vlan_tbl_opcode op)
5309 {
5310         struct hclge_dev *hdev = vport->back;
5311         int return_status = -EIO;
5312
5313         if (cmdq_resp) {
5314                 dev_err(&hdev->pdev->dev,
5315                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5316                         cmdq_resp);
5317                 return -EIO;
5318         }
5319
5320         if (op == HCLGE_MAC_VLAN_ADD) {
5321                 if ((!resp_code) || (resp_code == 1)) {
5322                         return_status = 0;
5323                 } else if (resp_code == 2) {
5324                         return_status = -ENOSPC;
5325                         dev_err(&hdev->pdev->dev,
5326                                 "add mac addr failed for uc_overflow.\n");
5327                 } else if (resp_code == 3) {
5328                         return_status = -ENOSPC;
5329                         dev_err(&hdev->pdev->dev,
5330                                 "add mac addr failed for mc_overflow.\n");
5331                 } else {
5332                         dev_err(&hdev->pdev->dev,
5333                                 "add mac addr failed for undefined, code=%d.\n",
5334                                 resp_code);
5335                 }
5336         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5337                 if (!resp_code) {
5338                         return_status = 0;
5339                 } else if (resp_code == 1) {
5340                         return_status = -ENOENT;
5341                         dev_dbg(&hdev->pdev->dev,
5342                                 "remove mac addr failed for miss.\n");
5343                 } else {
5344                         dev_err(&hdev->pdev->dev,
5345                                 "remove mac addr failed for undefined, code=%d.\n",
5346                                 resp_code);
5347                 }
5348         } else if (op == HCLGE_MAC_VLAN_LKUP) {
5349                 if (!resp_code) {
5350                         return_status = 0;
5351                 } else if (resp_code == 1) {
5352                         return_status = -ENOENT;
5353                         dev_dbg(&hdev->pdev->dev,
5354                                 "lookup mac addr failed for miss.\n");
5355                 } else {
5356                         dev_err(&hdev->pdev->dev,
5357                                 "lookup mac addr failed for undefined, code=%d.\n",
5358                                 resp_code);
5359                 }
5360         } else {
5361                 return_status = -EINVAL;
5362                 dev_err(&hdev->pdev->dev,
5363                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5364                         op);
5365         }
5366
5367         return return_status;
5368 }
5369
5370 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5371 {
5372         int word_num;
5373         int bit_num;
5374
5375         if (vfid > 255 || vfid < 0)
5376                 return -EIO;
5377
5378         if (vfid >= 0 && vfid <= 191) {
5379                 word_num = vfid / 32;
5380                 bit_num  = vfid % 32;
5381                 if (clr)
5382                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5383                 else
5384                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5385         } else {
5386                 word_num = (vfid - 192) / 32;
5387                 bit_num  = vfid % 32;
5388                 if (clr)
5389                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5390                 else
5391                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5392         }
5393
5394         return 0;
5395 }
5396
5397 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5398 {
5399 #define HCLGE_DESC_NUMBER 3
5400 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5401         int i, j;
5402
5403         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5404                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5405                         if (desc[i].data[j])
5406                                 return false;
5407
5408         return true;
5409 }
5410
5411 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5412                                    const u8 *addr)
5413 {
5414         const unsigned char *mac_addr = addr;
5415         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5416                        (mac_addr[0]) | (mac_addr[1] << 8);
5417         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5418
5419         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5420         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5421 }
5422
5423 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5424                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
5425 {
5426         struct hclge_dev *hdev = vport->back;
5427         struct hclge_desc desc;
5428         u8 resp_code;
5429         u16 retval;
5430         int ret;
5431
5432         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5433
5434         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5435
5436         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5437         if (ret) {
5438                 dev_err(&hdev->pdev->dev,
5439                         "del mac addr failed for cmd_send, ret =%d.\n",
5440                         ret);
5441                 return ret;
5442         }
5443         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5444         retval = le16_to_cpu(desc.retval);
5445
5446         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5447                                              HCLGE_MAC_VLAN_REMOVE);
5448 }
5449
5450 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5451                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
5452                                      struct hclge_desc *desc,
5453                                      bool is_mc)
5454 {
5455         struct hclge_dev *hdev = vport->back;
5456         u8 resp_code;
5457         u16 retval;
5458         int ret;
5459
5460         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5461         if (is_mc) {
5462                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5463                 memcpy(desc[0].data,
5464                        req,
5465                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5466                 hclge_cmd_setup_basic_desc(&desc[1],
5467                                            HCLGE_OPC_MAC_VLAN_ADD,
5468                                            true);
5469                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5470                 hclge_cmd_setup_basic_desc(&desc[2],
5471                                            HCLGE_OPC_MAC_VLAN_ADD,
5472                                            true);
5473                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5474         } else {
5475                 memcpy(desc[0].data,
5476                        req,
5477                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5478                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5479         }
5480         if (ret) {
5481                 dev_err(&hdev->pdev->dev,
5482                         "lookup mac addr failed for cmd_send, ret =%d.\n",
5483                         ret);
5484                 return ret;
5485         }
5486         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5487         retval = le16_to_cpu(desc[0].retval);
5488
5489         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5490                                              HCLGE_MAC_VLAN_LKUP);
5491 }
5492
5493 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5494                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
5495                                   struct hclge_desc *mc_desc)
5496 {
5497         struct hclge_dev *hdev = vport->back;
5498         int cfg_status;
5499         u8 resp_code;
5500         u16 retval;
5501         int ret;
5502
5503         if (!mc_desc) {
5504                 struct hclge_desc desc;
5505
5506                 hclge_cmd_setup_basic_desc(&desc,
5507                                            HCLGE_OPC_MAC_VLAN_ADD,
5508                                            false);
5509                 memcpy(desc.data, req,
5510                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5511                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5512                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5513                 retval = le16_to_cpu(desc.retval);
5514
5515                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5516                                                            resp_code,
5517                                                            HCLGE_MAC_VLAN_ADD);
5518         } else {
5519                 hclge_cmd_reuse_desc(&mc_desc[0], false);
5520                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5521                 hclge_cmd_reuse_desc(&mc_desc[1], false);
5522                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5523                 hclge_cmd_reuse_desc(&mc_desc[2], false);
5524                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5525                 memcpy(mc_desc[0].data, req,
5526                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5527                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5528                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5529                 retval = le16_to_cpu(mc_desc[0].retval);
5530
5531                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5532                                                            resp_code,
5533                                                            HCLGE_MAC_VLAN_ADD);
5534         }
5535
5536         if (ret) {
5537                 dev_err(&hdev->pdev->dev,
5538                         "add mac addr failed for cmd_send, ret =%d.\n",
5539                         ret);
5540                 return ret;
5541         }
5542
5543         return cfg_status;
5544 }
5545
5546 static int hclge_init_umv_space(struct hclge_dev *hdev)
5547 {
5548         u16 allocated_size = 0;
5549         int ret;
5550
5551         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5552                                   true);
5553         if (ret)
5554                 return ret;
5555
5556         if (allocated_size < hdev->wanted_umv_size)
5557                 dev_warn(&hdev->pdev->dev,
5558                          "Alloc umv space failed, want %d, get %d\n",
5559                          hdev->wanted_umv_size, allocated_size);
5560
5561         mutex_init(&hdev->umv_mutex);
5562         hdev->max_umv_size = allocated_size;
5563         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5564         hdev->share_umv_size = hdev->priv_umv_size +
5565                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
5566
5567         return 0;
5568 }
5569
5570 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5571 {
5572         int ret;
5573
5574         if (hdev->max_umv_size > 0) {
5575                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5576                                           false);
5577                 if (ret)
5578                         return ret;
5579                 hdev->max_umv_size = 0;
5580         }
5581         mutex_destroy(&hdev->umv_mutex);
5582
5583         return 0;
5584 }
5585
5586 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5587                                u16 *allocated_size, bool is_alloc)
5588 {
5589         struct hclge_umv_spc_alc_cmd *req;
5590         struct hclge_desc desc;
5591         int ret;
5592
5593         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5594         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5595         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5596         req->space_size = cpu_to_le32(space_size);
5597
5598         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5599         if (ret) {
5600                 dev_err(&hdev->pdev->dev,
5601                         "%s umv space failed for cmd_send, ret =%d\n",
5602                         is_alloc ? "allocate" : "free", ret);
5603                 return ret;
5604         }
5605
5606         if (is_alloc && allocated_size)
5607                 *allocated_size = le32_to_cpu(desc.data[1]);
5608
5609         return 0;
5610 }
5611
5612 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5613 {
5614         struct hclge_vport *vport;
5615         int i;
5616
5617         for (i = 0; i < hdev->num_alloc_vport; i++) {
5618                 vport = &hdev->vport[i];
5619                 vport->used_umv_num = 0;
5620         }
5621
5622         mutex_lock(&hdev->umv_mutex);
5623         hdev->share_umv_size = hdev->priv_umv_size +
5624                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
5625         mutex_unlock(&hdev->umv_mutex);
5626 }
5627
5628 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5629 {
5630         struct hclge_dev *hdev = vport->back;
5631         bool is_full;
5632
5633         mutex_lock(&hdev->umv_mutex);
5634         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5635                    hdev->share_umv_size == 0);
5636         mutex_unlock(&hdev->umv_mutex);
5637
5638         return is_full;
5639 }
5640
5641 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5642 {
5643         struct hclge_dev *hdev = vport->back;
5644
5645         mutex_lock(&hdev->umv_mutex);
5646         if (is_free) {
5647                 if (vport->used_umv_num > hdev->priv_umv_size)
5648                         hdev->share_umv_size++;
5649                 vport->used_umv_num--;
5650         } else {
5651                 if (vport->used_umv_num >= hdev->priv_umv_size)
5652                         hdev->share_umv_size--;
5653                 vport->used_umv_num++;
5654         }
5655         mutex_unlock(&hdev->umv_mutex);
5656 }
5657
5658 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5659                              const unsigned char *addr)
5660 {
5661         struct hclge_vport *vport = hclge_get_vport(handle);
5662
5663         return hclge_add_uc_addr_common(vport, addr);
5664 }
5665
5666 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5667                              const unsigned char *addr)
5668 {
5669         struct hclge_dev *hdev = vport->back;
5670         struct hclge_mac_vlan_tbl_entry_cmd req;
5671         struct hclge_desc desc;
5672         u16 egress_port = 0;
5673         int ret;
5674
5675         /* mac addr check */
5676         if (is_zero_ether_addr(addr) ||
5677             is_broadcast_ether_addr(addr) ||
5678             is_multicast_ether_addr(addr)) {
5679                 dev_err(&hdev->pdev->dev,
5680                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5681                          addr,
5682                          is_zero_ether_addr(addr),
5683                          is_broadcast_ether_addr(addr),
5684                          is_multicast_ether_addr(addr));
5685                 return -EINVAL;
5686         }
5687
5688         memset(&req, 0, sizeof(req));
5689         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5690
5691         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5692                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5693
5694         req.egress_port = cpu_to_le16(egress_port);
5695
5696         hclge_prepare_mac_addr(&req, addr);
5697
5698         /* Lookup the mac address in the mac_vlan table, and add
5699          * it if the entry is inexistent. Repeated unicast entry
5700          * is not allowed in the mac vlan table.
5701          */
5702         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5703         if (ret == -ENOENT) {
5704                 if (!hclge_is_umv_space_full(vport)) {
5705                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5706                         if (!ret)
5707                                 hclge_update_umv_space(vport, false);
5708                         return ret;
5709                 }
5710
5711                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5712                         hdev->priv_umv_size);
5713
5714                 return -ENOSPC;
5715         }
5716
5717         /* check if we just hit the duplicate */
5718         if (!ret)
5719                 ret = -EINVAL;
5720
5721         dev_err(&hdev->pdev->dev,
5722                 "PF failed to add unicast entry(%pM) in the MAC table\n",
5723                 addr);
5724
5725         return ret;
5726 }
5727
5728 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5729                             const unsigned char *addr)
5730 {
5731         struct hclge_vport *vport = hclge_get_vport(handle);
5732
5733         return hclge_rm_uc_addr_common(vport, addr);
5734 }
5735
5736 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5737                             const unsigned char *addr)
5738 {
5739         struct hclge_dev *hdev = vport->back;
5740         struct hclge_mac_vlan_tbl_entry_cmd req;
5741         int ret;
5742
5743         /* mac addr check */
5744         if (is_zero_ether_addr(addr) ||
5745             is_broadcast_ether_addr(addr) ||
5746             is_multicast_ether_addr(addr)) {
5747                 dev_dbg(&hdev->pdev->dev,
5748                         "Remove mac err! invalid mac:%pM.\n",
5749                          addr);
5750                 return -EINVAL;
5751         }
5752
5753         memset(&req, 0, sizeof(req));
5754         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5755         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5756         hclge_prepare_mac_addr(&req, addr);
5757         ret = hclge_remove_mac_vlan_tbl(vport, &req);
5758         if (!ret)
5759                 hclge_update_umv_space(vport, true);
5760
5761         return ret;
5762 }
5763
5764 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5765                              const unsigned char *addr)
5766 {
5767         struct hclge_vport *vport = hclge_get_vport(handle);
5768
5769         return hclge_add_mc_addr_common(vport, addr);
5770 }
5771
5772 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5773                              const unsigned char *addr)
5774 {
5775         struct hclge_dev *hdev = vport->back;
5776         struct hclge_mac_vlan_tbl_entry_cmd req;
5777         struct hclge_desc desc[3];
5778         int status;
5779
5780         /* mac addr check */
5781         if (!is_multicast_ether_addr(addr)) {
5782                 dev_err(&hdev->pdev->dev,
5783                         "Add mc mac err! invalid mac:%pM.\n",
5784                          addr);
5785                 return -EINVAL;
5786         }
5787         memset(&req, 0, sizeof(req));
5788         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5789         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5790         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5791         hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5792         hclge_prepare_mac_addr(&req, addr);
5793         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5794         if (!status) {
5795                 /* This mac addr exist, update VFID for it */
5796                 hclge_update_desc_vfid(desc, vport->vport_id, false);
5797                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5798         } else {
5799                 /* This mac addr do not exist, add new entry for it */
5800                 memset(desc[0].data, 0, sizeof(desc[0].data));
5801                 memset(desc[1].data, 0, sizeof(desc[0].data));
5802                 memset(desc[2].data, 0, sizeof(desc[0].data));
5803                 hclge_update_desc_vfid(desc, vport->vport_id, false);
5804                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5805         }
5806
5807         if (status == -ENOSPC)
5808                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
5809
5810         return status;
5811 }
5812
5813 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
5814                             const unsigned char *addr)
5815 {
5816         struct hclge_vport *vport = hclge_get_vport(handle);
5817
5818         return hclge_rm_mc_addr_common(vport, addr);
5819 }
5820
5821 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
5822                             const unsigned char *addr)
5823 {
5824         struct hclge_dev *hdev = vport->back;
5825         struct hclge_mac_vlan_tbl_entry_cmd req;
5826         enum hclge_cmd_status status;
5827         struct hclge_desc desc[3];
5828
5829         /* mac addr check */
5830         if (!is_multicast_ether_addr(addr)) {
5831                 dev_dbg(&hdev->pdev->dev,
5832                         "Remove mc mac err! invalid mac:%pM.\n",
5833                          addr);
5834                 return -EINVAL;
5835         }
5836
5837         memset(&req, 0, sizeof(req));
5838         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5839         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5840         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5841         hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5842         hclge_prepare_mac_addr(&req, addr);
5843         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5844         if (!status) {
5845                 /* This mac addr exist, remove this handle's VFID for it */
5846                 hclge_update_desc_vfid(desc, vport->vport_id, true);
5847
5848                 if (hclge_is_all_function_id_zero(desc))
5849                         /* All the vfid is zero, so need to delete this entry */
5850                         status = hclge_remove_mac_vlan_tbl(vport, &req);
5851                 else
5852                         /* Not all the vfid is zero, update the vfid */
5853                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5854
5855         } else {
5856                 /* Maybe this mac address is in mta table, but it cannot be
5857                  * deleted here because an entry of mta represents an address
5858                  * range rather than a specific address. the delete action to
5859                  * all entries will take effect in update_mta_status called by
5860                  * hns3_nic_set_rx_mode.
5861                  */
5862                 status = 0;
5863         }
5864
5865         return status;
5866 }
5867
5868 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
5869                                               u16 cmdq_resp, u8 resp_code)
5870 {
5871 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
5872 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
5873 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
5874 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
5875
5876         int return_status;
5877
5878         if (cmdq_resp) {
5879                 dev_err(&hdev->pdev->dev,
5880                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
5881                         cmdq_resp);
5882                 return -EIO;
5883         }
5884
5885         switch (resp_code) {
5886         case HCLGE_ETHERTYPE_SUCCESS_ADD:
5887         case HCLGE_ETHERTYPE_ALREADY_ADD:
5888                 return_status = 0;
5889                 break;
5890         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
5891                 dev_err(&hdev->pdev->dev,
5892                         "add mac ethertype failed for manager table overflow.\n");
5893                 return_status = -EIO;
5894                 break;
5895         case HCLGE_ETHERTYPE_KEY_CONFLICT:
5896                 dev_err(&hdev->pdev->dev,
5897                         "add mac ethertype failed for key conflict.\n");
5898                 return_status = -EIO;
5899                 break;
5900         default:
5901                 dev_err(&hdev->pdev->dev,
5902                         "add mac ethertype failed for undefined, code=%d.\n",
5903                         resp_code);
5904                 return_status = -EIO;
5905         }
5906
5907         return return_status;
5908 }
5909
5910 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
5911                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
5912 {
5913         struct hclge_desc desc;
5914         u8 resp_code;
5915         u16 retval;
5916         int ret;
5917
5918         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
5919         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
5920
5921         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5922         if (ret) {
5923                 dev_err(&hdev->pdev->dev,
5924                         "add mac ethertype failed for cmd_send, ret =%d.\n",
5925                         ret);
5926                 return ret;
5927         }
5928
5929         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5930         retval = le16_to_cpu(desc.retval);
5931
5932         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
5933 }
5934
5935 static int init_mgr_tbl(struct hclge_dev *hdev)
5936 {
5937         int ret;
5938         int i;
5939
5940         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
5941                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
5942                 if (ret) {
5943                         dev_err(&hdev->pdev->dev,
5944                                 "add mac ethertype failed, ret =%d.\n",
5945                                 ret);
5946                         return ret;
5947                 }
5948         }
5949
5950         return 0;
5951 }
5952
5953 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
5954 {
5955         struct hclge_vport *vport = hclge_get_vport(handle);
5956         struct hclge_dev *hdev = vport->back;
5957
5958         ether_addr_copy(p, hdev->hw.mac.mac_addr);
5959 }
5960
5961 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
5962                               bool is_first)
5963 {
5964         const unsigned char *new_addr = (const unsigned char *)p;
5965         struct hclge_vport *vport = hclge_get_vport(handle);
5966         struct hclge_dev *hdev = vport->back;
5967         int ret;
5968
5969         /* mac addr check */
5970         if (is_zero_ether_addr(new_addr) ||
5971             is_broadcast_ether_addr(new_addr) ||
5972             is_multicast_ether_addr(new_addr)) {
5973                 dev_err(&hdev->pdev->dev,
5974                         "Change uc mac err! invalid mac:%p.\n",
5975                          new_addr);
5976                 return -EINVAL;
5977         }
5978
5979         if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
5980                 dev_warn(&hdev->pdev->dev,
5981                          "remove old uc mac address fail.\n");
5982
5983         ret = hclge_add_uc_addr(handle, new_addr);
5984         if (ret) {
5985                 dev_err(&hdev->pdev->dev,
5986                         "add uc mac address fail, ret =%d.\n",
5987                         ret);
5988
5989                 if (!is_first &&
5990                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
5991                         dev_err(&hdev->pdev->dev,
5992                                 "restore uc mac address fail.\n");
5993
5994                 return -EIO;
5995         }
5996
5997         ret = hclge_pause_addr_cfg(hdev, new_addr);
5998         if (ret) {
5999                 dev_err(&hdev->pdev->dev,
6000                         "configure mac pause address fail, ret =%d.\n",
6001                         ret);
6002                 return -EIO;
6003         }
6004
6005         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6006
6007         return 0;
6008 }
6009
6010 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6011                           int cmd)
6012 {
6013         struct hclge_vport *vport = hclge_get_vport(handle);
6014         struct hclge_dev *hdev = vport->back;
6015
6016         if (!hdev->hw.mac.phydev)
6017                 return -EOPNOTSUPP;
6018
6019         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6020 }
6021
6022 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6023                                       u8 fe_type, bool filter_en)
6024 {
6025         struct hclge_vlan_filter_ctrl_cmd *req;
6026         struct hclge_desc desc;
6027         int ret;
6028
6029         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6030
6031         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6032         req->vlan_type = vlan_type;
6033         req->vlan_fe = filter_en ? fe_type : 0;
6034
6035         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6036         if (ret)
6037                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6038                         ret);
6039
6040         return ret;
6041 }
6042
6043 #define HCLGE_FILTER_TYPE_VF            0
6044 #define HCLGE_FILTER_TYPE_PORT          1
6045 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
6046 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
6047 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
6048 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
6049 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
6050 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
6051                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6052 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
6053                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6054
6055 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6056 {
6057         struct hclge_vport *vport = hclge_get_vport(handle);
6058         struct hclge_dev *hdev = vport->back;
6059
6060         if (hdev->pdev->revision >= 0x21) {
6061                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6062                                            HCLGE_FILTER_FE_EGRESS, enable);
6063                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6064                                            HCLGE_FILTER_FE_INGRESS, enable);
6065         } else {
6066                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6067                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable);
6068         }
6069         if (enable)
6070                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6071         else
6072                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6073 }
6074
6075 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6076                                     bool is_kill, u16 vlan, u8 qos,
6077                                     __be16 proto)
6078 {
6079 #define HCLGE_MAX_VF_BYTES  16
6080         struct hclge_vlan_filter_vf_cfg_cmd *req0;
6081         struct hclge_vlan_filter_vf_cfg_cmd *req1;
6082         struct hclge_desc desc[2];
6083         u8 vf_byte_val;
6084         u8 vf_byte_off;
6085         int ret;
6086
6087         hclge_cmd_setup_basic_desc(&desc[0],
6088                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6089         hclge_cmd_setup_basic_desc(&desc[1],
6090                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6091
6092         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6093
6094         vf_byte_off = vfid / 8;
6095         vf_byte_val = 1 << (vfid % 8);
6096
6097         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6098         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6099
6100         req0->vlan_id  = cpu_to_le16(vlan);
6101         req0->vlan_cfg = is_kill;
6102
6103         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6104                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6105         else
6106                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6107
6108         ret = hclge_cmd_send(&hdev->hw, desc, 2);
6109         if (ret) {
6110                 dev_err(&hdev->pdev->dev,
6111                         "Send vf vlan command fail, ret =%d.\n",
6112                         ret);
6113                 return ret;
6114         }
6115
6116         if (!is_kill) {
6117 #define HCLGE_VF_VLAN_NO_ENTRY  2
6118                 if (!req0->resp_code || req0->resp_code == 1)
6119                         return 0;
6120
6121                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6122                         dev_warn(&hdev->pdev->dev,
6123                                  "vf vlan table is full, vf vlan filter is disabled\n");
6124                         return 0;
6125                 }
6126
6127                 dev_err(&hdev->pdev->dev,
6128                         "Add vf vlan filter fail, ret =%d.\n",
6129                         req0->resp_code);
6130         } else {
6131 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
6132                 if (!req0->resp_code)
6133                         return 0;
6134
6135                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6136                         dev_warn(&hdev->pdev->dev,
6137                                  "vlan %d filter is not in vf vlan table\n",
6138                                  vlan);
6139                         return 0;
6140                 }
6141
6142                 dev_err(&hdev->pdev->dev,
6143                         "Kill vf vlan filter fail, ret =%d.\n",
6144                         req0->resp_code);
6145         }
6146
6147         return -EIO;
6148 }
6149
6150 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6151                                       u16 vlan_id, bool is_kill)
6152 {
6153         struct hclge_vlan_filter_pf_cfg_cmd *req;
6154         struct hclge_desc desc;
6155         u8 vlan_offset_byte_val;
6156         u8 vlan_offset_byte;
6157         u8 vlan_offset_160;
6158         int ret;
6159
6160         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6161
6162         vlan_offset_160 = vlan_id / 160;
6163         vlan_offset_byte = (vlan_id % 160) / 8;
6164         vlan_offset_byte_val = 1 << (vlan_id % 8);
6165
6166         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6167         req->vlan_offset = vlan_offset_160;
6168         req->vlan_cfg = is_kill;
6169         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6170
6171         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6172         if (ret)
6173                 dev_err(&hdev->pdev->dev,
6174                         "port vlan command, send fail, ret =%d.\n", ret);
6175         return ret;
6176 }
6177
6178 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6179                                     u16 vport_id, u16 vlan_id, u8 qos,
6180                                     bool is_kill)
6181 {
6182         u16 vport_idx, vport_num = 0;
6183         int ret;
6184
6185         if (is_kill && !vlan_id)
6186                 return 0;
6187
6188         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6189                                        0, proto);
6190         if (ret) {
6191                 dev_err(&hdev->pdev->dev,
6192                         "Set %d vport vlan filter config fail, ret =%d.\n",
6193                         vport_id, ret);
6194                 return ret;
6195         }
6196
6197         /* vlan 0 may be added twice when 8021q module is enabled */
6198         if (!is_kill && !vlan_id &&
6199             test_bit(vport_id, hdev->vlan_table[vlan_id]))
6200                 return 0;
6201
6202         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6203                 dev_err(&hdev->pdev->dev,
6204                         "Add port vlan failed, vport %d is already in vlan %d\n",
6205                         vport_id, vlan_id);
6206                 return -EINVAL;
6207         }
6208
6209         if (is_kill &&
6210             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6211                 dev_err(&hdev->pdev->dev,
6212                         "Delete port vlan failed, vport %d is not in vlan %d\n",
6213                         vport_id, vlan_id);
6214                 return -EINVAL;
6215         }
6216
6217         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6218                 vport_num++;
6219
6220         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6221                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6222                                                  is_kill);
6223
6224         return ret;
6225 }
6226
6227 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6228                           u16 vlan_id, bool is_kill)
6229 {
6230         struct hclge_vport *vport = hclge_get_vport(handle);
6231         struct hclge_dev *hdev = vport->back;
6232
6233         return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6234                                         0, is_kill);
6235 }
6236
6237 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6238                                     u16 vlan, u8 qos, __be16 proto)
6239 {
6240         struct hclge_vport *vport = hclge_get_vport(handle);
6241         struct hclge_dev *hdev = vport->back;
6242
6243         if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6244                 return -EINVAL;
6245         if (proto != htons(ETH_P_8021Q))
6246                 return -EPROTONOSUPPORT;
6247
6248         return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6249 }
6250
6251 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6252 {
6253         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6254         struct hclge_vport_vtag_tx_cfg_cmd *req;
6255         struct hclge_dev *hdev = vport->back;
6256         struct hclge_desc desc;
6257         int status;
6258
6259         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6260
6261         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6262         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6263         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6264         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6265                       vcfg->accept_tag1 ? 1 : 0);
6266         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6267                       vcfg->accept_untag1 ? 1 : 0);
6268         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6269                       vcfg->accept_tag2 ? 1 : 0);
6270         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6271                       vcfg->accept_untag2 ? 1 : 0);
6272         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6273                       vcfg->insert_tag1_en ? 1 : 0);
6274         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6275                       vcfg->insert_tag2_en ? 1 : 0);
6276         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6277
6278         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6279         req->vf_bitmap[req->vf_offset] =
6280                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6281
6282         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6283         if (status)
6284                 dev_err(&hdev->pdev->dev,
6285                         "Send port txvlan cfg command fail, ret =%d\n",
6286                         status);
6287
6288         return status;
6289 }
6290
6291 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6292 {
6293         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6294         struct hclge_vport_vtag_rx_cfg_cmd *req;
6295         struct hclge_dev *hdev = vport->back;
6296         struct hclge_desc desc;
6297         int status;
6298
6299         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6300
6301         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6302         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6303                       vcfg->strip_tag1_en ? 1 : 0);
6304         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6305                       vcfg->strip_tag2_en ? 1 : 0);
6306         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6307                       vcfg->vlan1_vlan_prionly ? 1 : 0);
6308         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6309                       vcfg->vlan2_vlan_prionly ? 1 : 0);
6310
6311         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6312         req->vf_bitmap[req->vf_offset] =
6313                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6314
6315         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6316         if (status)
6317                 dev_err(&hdev->pdev->dev,
6318                         "Send port rxvlan cfg command fail, ret =%d\n",
6319                         status);
6320
6321         return status;
6322 }
6323
6324 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6325 {
6326         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6327         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6328         struct hclge_desc desc;
6329         int status;
6330
6331         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6332         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6333         rx_req->ot_fst_vlan_type =
6334                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6335         rx_req->ot_sec_vlan_type =
6336                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6337         rx_req->in_fst_vlan_type =
6338                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6339         rx_req->in_sec_vlan_type =
6340                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6341
6342         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6343         if (status) {
6344                 dev_err(&hdev->pdev->dev,
6345                         "Send rxvlan protocol type command fail, ret =%d\n",
6346                         status);
6347                 return status;
6348         }
6349
6350         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6351
6352         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6353         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6354         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6355
6356         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6357         if (status)
6358                 dev_err(&hdev->pdev->dev,
6359                         "Send txvlan protocol type command fail, ret =%d\n",
6360                         status);
6361
6362         return status;
6363 }
6364
6365 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6366 {
6367 #define HCLGE_DEF_VLAN_TYPE             0x8100
6368
6369         struct hnae3_handle *handle = &hdev->vport[0].nic;
6370         struct hclge_vport *vport;
6371         int ret;
6372         int i;
6373
6374         if (hdev->pdev->revision >= 0x21) {
6375                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6376                                                  HCLGE_FILTER_FE_EGRESS, true);
6377                 if (ret)
6378                         return ret;
6379
6380                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6381                                                  HCLGE_FILTER_FE_INGRESS, true);
6382                 if (ret)
6383                         return ret;
6384         } else {
6385                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6386                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
6387                                                  true);
6388                 if (ret)
6389                         return ret;
6390         }
6391
6392         handle->netdev_flags |= HNAE3_VLAN_FLTR;
6393
6394         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6395         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6396         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6397         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6398         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6399         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6400
6401         ret = hclge_set_vlan_protocol_type(hdev);
6402         if (ret)
6403                 return ret;
6404
6405         for (i = 0; i < hdev->num_alloc_vport; i++) {
6406                 vport = &hdev->vport[i];
6407                 vport->txvlan_cfg.accept_tag1 = true;
6408                 vport->txvlan_cfg.accept_untag1 = true;
6409
6410                 /* accept_tag2 and accept_untag2 are not supported on
6411                  * pdev revision(0x20), new revision support them. The
6412                  * value of this two fields will not return error when driver
6413                  * send command to fireware in revision(0x20).
6414                  * This two fields can not configured by user.
6415                  */
6416                 vport->txvlan_cfg.accept_tag2 = true;
6417                 vport->txvlan_cfg.accept_untag2 = true;
6418
6419                 vport->txvlan_cfg.insert_tag1_en = false;
6420                 vport->txvlan_cfg.insert_tag2_en = false;
6421                 vport->txvlan_cfg.default_tag1 = 0;
6422                 vport->txvlan_cfg.default_tag2 = 0;
6423
6424                 ret = hclge_set_vlan_tx_offload_cfg(vport);
6425                 if (ret)
6426                         return ret;
6427
6428                 vport->rxvlan_cfg.strip_tag1_en = false;
6429                 vport->rxvlan_cfg.strip_tag2_en = true;
6430                 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6431                 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6432
6433                 ret = hclge_set_vlan_rx_offload_cfg(vport);
6434                 if (ret)
6435                         return ret;
6436         }
6437
6438         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6439 }
6440
6441 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6442 {
6443         struct hclge_vport *vport = hclge_get_vport(handle);
6444
6445         vport->rxvlan_cfg.strip_tag1_en = false;
6446         vport->rxvlan_cfg.strip_tag2_en = enable;
6447         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6448         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6449
6450         return hclge_set_vlan_rx_offload_cfg(vport);
6451 }
6452
6453 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6454 {
6455         struct hclge_config_max_frm_size_cmd *req;
6456         struct hclge_desc desc;
6457
6458         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6459
6460         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6461         req->max_frm_size = cpu_to_le16(new_mps);
6462         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6463
6464         return hclge_cmd_send(&hdev->hw, &desc, 1);
6465 }
6466
6467 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6468 {
6469         struct hclge_vport *vport = hclge_get_vport(handle);
6470
6471         return hclge_set_vport_mtu(vport, new_mtu);
6472 }
6473
6474 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6475 {
6476         struct hclge_dev *hdev = vport->back;
6477         int i, max_frm_size, ret = 0;
6478
6479         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6480         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6481             max_frm_size > HCLGE_MAC_MAX_FRAME)
6482                 return -EINVAL;
6483
6484         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6485         mutex_lock(&hdev->vport_lock);
6486         /* VF's mps must fit within hdev->mps */
6487         if (vport->vport_id && max_frm_size > hdev->mps) {
6488                 mutex_unlock(&hdev->vport_lock);
6489                 return -EINVAL;
6490         } else if (vport->vport_id) {
6491                 vport->mps = max_frm_size;
6492                 mutex_unlock(&hdev->vport_lock);
6493                 return 0;
6494         }
6495
6496         /* PF's mps must be greater then VF's mps */
6497         for (i = 1; i < hdev->num_alloc_vport; i++)
6498                 if (max_frm_size < hdev->vport[i].mps) {
6499                         mutex_unlock(&hdev->vport_lock);
6500                         return -EINVAL;
6501                 }
6502
6503         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6504
6505         ret = hclge_set_mac_mtu(hdev, max_frm_size);
6506         if (ret) {
6507                 dev_err(&hdev->pdev->dev,
6508                         "Change mtu fail, ret =%d\n", ret);
6509                 goto out;
6510         }
6511
6512         hdev->mps = max_frm_size;
6513         vport->mps = max_frm_size;
6514
6515         ret = hclge_buffer_alloc(hdev);
6516         if (ret)
6517                 dev_err(&hdev->pdev->dev,
6518                         "Allocate buffer fail, ret =%d\n", ret);
6519
6520 out:
6521         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6522         mutex_unlock(&hdev->vport_lock);
6523         return ret;
6524 }
6525
6526 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6527                                     bool enable)
6528 {
6529         struct hclge_reset_tqp_queue_cmd *req;
6530         struct hclge_desc desc;
6531         int ret;
6532
6533         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6534
6535         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6536         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6537         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6538
6539         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6540         if (ret) {
6541                 dev_err(&hdev->pdev->dev,
6542                         "Send tqp reset cmd error, status =%d\n", ret);
6543                 return ret;
6544         }
6545
6546         return 0;
6547 }
6548
6549 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6550 {
6551         struct hclge_reset_tqp_queue_cmd *req;
6552         struct hclge_desc desc;
6553         int ret;
6554
6555         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6556
6557         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6558         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6559
6560         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6561         if (ret) {
6562                 dev_err(&hdev->pdev->dev,
6563                         "Get reset status error, status =%d\n", ret);
6564                 return ret;
6565         }
6566
6567         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6568 }
6569
6570 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
6571                                           u16 queue_id)
6572 {
6573         struct hnae3_queue *queue;
6574         struct hclge_tqp *tqp;
6575
6576         queue = handle->kinfo.tqp[queue_id];
6577         tqp = container_of(queue, struct hclge_tqp, q);
6578
6579         return tqp->index;
6580 }
6581
6582 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
6583 {
6584         struct hclge_vport *vport = hclge_get_vport(handle);
6585         struct hclge_dev *hdev = vport->back;
6586         int reset_try_times = 0;
6587         int reset_status;
6588         u16 queue_gid;
6589         int ret = 0;
6590
6591         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
6592
6593         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
6594         if (ret) {
6595                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
6596                 return ret;
6597         }
6598
6599         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6600         if (ret) {
6601                 dev_err(&hdev->pdev->dev,
6602                         "Send reset tqp cmd fail, ret = %d\n", ret);
6603                 return ret;
6604         }
6605
6606         reset_try_times = 0;
6607         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6608                 /* Wait for tqp hw reset */
6609                 msleep(20);
6610                 reset_status = hclge_get_reset_status(hdev, queue_gid);
6611                 if (reset_status)
6612                         break;
6613         }
6614
6615         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6616                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
6617                 return ret;
6618         }
6619
6620         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6621         if (ret)
6622                 dev_err(&hdev->pdev->dev,
6623                         "Deassert the soft reset fail, ret = %d\n", ret);
6624
6625         return ret;
6626 }
6627
6628 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
6629 {
6630         struct hclge_dev *hdev = vport->back;
6631         int reset_try_times = 0;
6632         int reset_status;
6633         u16 queue_gid;
6634         int ret;
6635
6636         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
6637
6638         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6639         if (ret) {
6640                 dev_warn(&hdev->pdev->dev,
6641                          "Send reset tqp cmd fail, ret = %d\n", ret);
6642                 return;
6643         }
6644
6645         reset_try_times = 0;
6646         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6647                 /* Wait for tqp hw reset */
6648                 msleep(20);
6649                 reset_status = hclge_get_reset_status(hdev, queue_gid);
6650                 if (reset_status)
6651                         break;
6652         }
6653
6654         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6655                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
6656                 return;
6657         }
6658
6659         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6660         if (ret)
6661                 dev_warn(&hdev->pdev->dev,
6662                          "Deassert the soft reset fail, ret = %d\n", ret);
6663 }
6664
6665 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
6666 {
6667         struct hclge_vport *vport = hclge_get_vport(handle);
6668         struct hclge_dev *hdev = vport->back;
6669
6670         return hdev->fw_version;
6671 }
6672
6673 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6674 {
6675         struct phy_device *phydev = hdev->hw.mac.phydev;
6676
6677         if (!phydev)
6678                 return;
6679
6680         phy_set_asym_pause(phydev, rx_en, tx_en);
6681 }
6682
6683 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6684 {
6685         int ret;
6686
6687         if (rx_en && tx_en)
6688                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
6689         else if (rx_en && !tx_en)
6690                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
6691         else if (!rx_en && tx_en)
6692                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
6693         else
6694                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
6695
6696         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
6697                 return 0;
6698
6699         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
6700         if (ret) {
6701                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
6702                         ret);
6703                 return ret;
6704         }
6705
6706         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
6707
6708         return 0;
6709 }
6710
6711 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
6712 {
6713         struct phy_device *phydev = hdev->hw.mac.phydev;
6714         u16 remote_advertising = 0;
6715         u16 local_advertising = 0;
6716         u32 rx_pause, tx_pause;
6717         u8 flowctl;
6718
6719         if (!phydev->link || !phydev->autoneg)
6720                 return 0;
6721
6722         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
6723
6724         if (phydev->pause)
6725                 remote_advertising = LPA_PAUSE_CAP;
6726
6727         if (phydev->asym_pause)
6728                 remote_advertising |= LPA_PAUSE_ASYM;
6729
6730         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
6731                                            remote_advertising);
6732         tx_pause = flowctl & FLOW_CTRL_TX;
6733         rx_pause = flowctl & FLOW_CTRL_RX;
6734
6735         if (phydev->duplex == HCLGE_MAC_HALF) {
6736                 tx_pause = 0;
6737                 rx_pause = 0;
6738         }
6739
6740         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
6741 }
6742
6743 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6744                                  u32 *rx_en, u32 *tx_en)
6745 {
6746         struct hclge_vport *vport = hclge_get_vport(handle);
6747         struct hclge_dev *hdev = vport->back;
6748
6749         *auto_neg = hclge_get_autoneg(handle);
6750
6751         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6752                 *rx_en = 0;
6753                 *tx_en = 0;
6754                 return;
6755         }
6756
6757         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6758                 *rx_en = 1;
6759                 *tx_en = 0;
6760         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6761                 *tx_en = 1;
6762                 *rx_en = 0;
6763         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6764                 *rx_en = 1;
6765                 *tx_en = 1;
6766         } else {
6767                 *rx_en = 0;
6768                 *tx_en = 0;
6769         }
6770 }
6771
6772 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6773                                 u32 rx_en, u32 tx_en)
6774 {
6775         struct hclge_vport *vport = hclge_get_vport(handle);
6776         struct hclge_dev *hdev = vport->back;
6777         struct phy_device *phydev = hdev->hw.mac.phydev;
6778         u32 fc_autoneg;
6779
6780         fc_autoneg = hclge_get_autoneg(handle);
6781         if (auto_neg != fc_autoneg) {
6782                 dev_info(&hdev->pdev->dev,
6783                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6784                 return -EOPNOTSUPP;
6785         }
6786
6787         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6788                 dev_info(&hdev->pdev->dev,
6789                          "Priority flow control enabled. Cannot set link flow control.\n");
6790                 return -EOPNOTSUPP;
6791         }
6792
6793         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6794
6795         if (!fc_autoneg)
6796                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6797
6798         /* Only support flow control negotiation for netdev with
6799          * phy attached for now.
6800          */
6801         if (!phydev)
6802                 return -EOPNOTSUPP;
6803
6804         return phy_start_aneg(phydev);
6805 }
6806
6807 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6808                                           u8 *auto_neg, u32 *speed, u8 *duplex)
6809 {
6810         struct hclge_vport *vport = hclge_get_vport(handle);
6811         struct hclge_dev *hdev = vport->back;
6812
6813         if (speed)
6814                 *speed = hdev->hw.mac.speed;
6815         if (duplex)
6816                 *duplex = hdev->hw.mac.duplex;
6817         if (auto_neg)
6818                 *auto_neg = hdev->hw.mac.autoneg;
6819 }
6820
6821 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
6822 {
6823         struct hclge_vport *vport = hclge_get_vport(handle);
6824         struct hclge_dev *hdev = vport->back;
6825
6826         if (media_type)
6827                 *media_type = hdev->hw.mac.media_type;
6828 }
6829
6830 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
6831                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
6832 {
6833         struct hclge_vport *vport = hclge_get_vport(handle);
6834         struct hclge_dev *hdev = vport->back;
6835         struct phy_device *phydev = hdev->hw.mac.phydev;
6836         int mdix_ctrl, mdix, retval, is_resolved;
6837
6838         if (!phydev) {
6839                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6840                 *tp_mdix = ETH_TP_MDI_INVALID;
6841                 return;
6842         }
6843
6844         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
6845
6846         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
6847         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
6848                                     HCLGE_PHY_MDIX_CTRL_S);
6849
6850         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
6851         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
6852         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
6853
6854         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
6855
6856         switch (mdix_ctrl) {
6857         case 0x0:
6858                 *tp_mdix_ctrl = ETH_TP_MDI;
6859                 break;
6860         case 0x1:
6861                 *tp_mdix_ctrl = ETH_TP_MDI_X;
6862                 break;
6863         case 0x3:
6864                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
6865                 break;
6866         default:
6867                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6868                 break;
6869         }
6870
6871         if (!is_resolved)
6872                 *tp_mdix = ETH_TP_MDI_INVALID;
6873         else if (mdix)
6874                 *tp_mdix = ETH_TP_MDI_X;
6875         else
6876                 *tp_mdix = ETH_TP_MDI;
6877 }
6878
6879 static int hclge_init_instance_hw(struct hclge_dev *hdev)
6880 {
6881         return hclge_mac_connect_phy(hdev);
6882 }
6883
6884 static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
6885 {
6886         hclge_mac_disconnect_phy(hdev);
6887 }
6888
6889 static int hclge_init_client_instance(struct hnae3_client *client,
6890                                       struct hnae3_ae_dev *ae_dev)
6891 {
6892         struct hclge_dev *hdev = ae_dev->priv;
6893         struct hclge_vport *vport;
6894         int i, ret;
6895
6896         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
6897                 vport = &hdev->vport[i];
6898
6899                 switch (client->type) {
6900                 case HNAE3_CLIENT_KNIC:
6901
6902                         hdev->nic_client = client;
6903                         vport->nic.client = client;
6904                         ret = client->ops->init_instance(&vport->nic);
6905                         if (ret)
6906                                 goto clear_nic;
6907
6908                         ret = hclge_init_instance_hw(hdev);
6909                         if (ret) {
6910                                 client->ops->uninit_instance(&vport->nic,
6911                                                              0);
6912                                 goto clear_nic;
6913                         }
6914
6915                         hnae3_set_client_init_flag(client, ae_dev, 1);
6916
6917                         if (hdev->roce_client &&
6918                             hnae3_dev_roce_supported(hdev)) {
6919                                 struct hnae3_client *rc = hdev->roce_client;
6920
6921                                 ret = hclge_init_roce_base_info(vport);
6922                                 if (ret)
6923                                         goto clear_roce;
6924
6925                                 ret = rc->ops->init_instance(&vport->roce);
6926                                 if (ret)
6927                                         goto clear_roce;
6928
6929                                 hnae3_set_client_init_flag(hdev->roce_client,
6930                                                            ae_dev, 1);
6931                         }
6932
6933                         break;
6934                 case HNAE3_CLIENT_UNIC:
6935                         hdev->nic_client = client;
6936                         vport->nic.client = client;
6937
6938                         ret = client->ops->init_instance(&vport->nic);
6939                         if (ret)
6940                                 goto clear_nic;
6941
6942                         hnae3_set_client_init_flag(client, ae_dev, 1);
6943
6944                         break;
6945                 case HNAE3_CLIENT_ROCE:
6946                         if (hnae3_dev_roce_supported(hdev)) {
6947                                 hdev->roce_client = client;
6948                                 vport->roce.client = client;
6949                         }
6950
6951                         if (hdev->roce_client && hdev->nic_client) {
6952                                 ret = hclge_init_roce_base_info(vport);
6953                                 if (ret)
6954                                         goto clear_roce;
6955
6956                                 ret = client->ops->init_instance(&vport->roce);
6957                                 if (ret)
6958                                         goto clear_roce;
6959
6960                                 hnae3_set_client_init_flag(client, ae_dev, 1);
6961                         }
6962
6963                         break;
6964                 default:
6965                         return -EINVAL;
6966                 }
6967         }
6968
6969         return 0;
6970
6971 clear_nic:
6972         hdev->nic_client = NULL;
6973         vport->nic.client = NULL;
6974         return ret;
6975 clear_roce:
6976         hdev->roce_client = NULL;
6977         vport->roce.client = NULL;
6978         return ret;
6979 }
6980
6981 static void hclge_uninit_client_instance(struct hnae3_client *client,
6982                                          struct hnae3_ae_dev *ae_dev)
6983 {
6984         struct hclge_dev *hdev = ae_dev->priv;
6985         struct hclge_vport *vport;
6986         int i;
6987
6988         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
6989                 vport = &hdev->vport[i];
6990                 if (hdev->roce_client) {
6991                         hdev->roce_client->ops->uninit_instance(&vport->roce,
6992                                                                 0);
6993                         hdev->roce_client = NULL;
6994                         vport->roce.client = NULL;
6995                 }
6996                 if (client->type == HNAE3_CLIENT_ROCE)
6997                         return;
6998                 if (hdev->nic_client && client->ops->uninit_instance) {
6999                         hclge_uninit_instance_hw(hdev);
7000                         client->ops->uninit_instance(&vport->nic, 0);
7001                         hdev->nic_client = NULL;
7002                         vport->nic.client = NULL;
7003                 }
7004         }
7005 }
7006
7007 static int hclge_pci_init(struct hclge_dev *hdev)
7008 {
7009         struct pci_dev *pdev = hdev->pdev;
7010         struct hclge_hw *hw;
7011         int ret;
7012
7013         ret = pci_enable_device(pdev);
7014         if (ret) {
7015                 dev_err(&pdev->dev, "failed to enable PCI device\n");
7016                 return ret;
7017         }
7018
7019         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7020         if (ret) {
7021                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7022                 if (ret) {
7023                         dev_err(&pdev->dev,
7024                                 "can't set consistent PCI DMA");
7025                         goto err_disable_device;
7026                 }
7027                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7028         }
7029
7030         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7031         if (ret) {
7032                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7033                 goto err_disable_device;
7034         }
7035
7036         pci_set_master(pdev);
7037         hw = &hdev->hw;
7038         hw->io_base = pcim_iomap(pdev, 2, 0);
7039         if (!hw->io_base) {
7040                 dev_err(&pdev->dev, "Can't map configuration register space\n");
7041                 ret = -ENOMEM;
7042                 goto err_clr_master;
7043         }
7044
7045         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7046
7047         return 0;
7048 err_clr_master:
7049         pci_clear_master(pdev);
7050         pci_release_regions(pdev);
7051 err_disable_device:
7052         pci_disable_device(pdev);
7053
7054         return ret;
7055 }
7056
7057 static void hclge_pci_uninit(struct hclge_dev *hdev)
7058 {
7059         struct pci_dev *pdev = hdev->pdev;
7060
7061         pcim_iounmap(pdev, hdev->hw.io_base);
7062         pci_free_irq_vectors(pdev);
7063         pci_clear_master(pdev);
7064         pci_release_mem_regions(pdev);
7065         pci_disable_device(pdev);
7066 }
7067
7068 static void hclge_state_init(struct hclge_dev *hdev)
7069 {
7070         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7071         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7072         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7073         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7074         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7075         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7076 }
7077
7078 static void hclge_state_uninit(struct hclge_dev *hdev)
7079 {
7080         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7081
7082         if (hdev->service_timer.function)
7083                 del_timer_sync(&hdev->service_timer);
7084         if (hdev->reset_timer.function)
7085                 del_timer_sync(&hdev->reset_timer);
7086         if (hdev->service_task.func)
7087                 cancel_work_sync(&hdev->service_task);
7088         if (hdev->rst_service_task.func)
7089                 cancel_work_sync(&hdev->rst_service_task);
7090         if (hdev->mbx_service_task.func)
7091                 cancel_work_sync(&hdev->mbx_service_task);
7092 }
7093
7094 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7095 {
7096 #define HCLGE_FLR_WAIT_MS       100
7097 #define HCLGE_FLR_WAIT_CNT      50
7098         struct hclge_dev *hdev = ae_dev->priv;
7099         int cnt = 0;
7100
7101         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7102         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7103         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7104         hclge_reset_event(hdev->pdev, NULL);
7105
7106         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7107                cnt++ < HCLGE_FLR_WAIT_CNT)
7108                 msleep(HCLGE_FLR_WAIT_MS);
7109
7110         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7111                 dev_err(&hdev->pdev->dev,
7112                         "flr wait down timeout: %d\n", cnt);
7113 }
7114
7115 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7116 {
7117         struct hclge_dev *hdev = ae_dev->priv;
7118
7119         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7120 }
7121
7122 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7123 {
7124         struct pci_dev *pdev = ae_dev->pdev;
7125         struct hclge_dev *hdev;
7126         int ret;
7127
7128         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7129         if (!hdev) {
7130                 ret = -ENOMEM;
7131                 goto out;
7132         }
7133
7134         hdev->pdev = pdev;
7135         hdev->ae_dev = ae_dev;
7136         hdev->reset_type = HNAE3_NONE_RESET;
7137         hdev->reset_level = HNAE3_FUNC_RESET;
7138         ae_dev->priv = hdev;
7139         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7140
7141         mutex_init(&hdev->vport_lock);
7142
7143         ret = hclge_pci_init(hdev);
7144         if (ret) {
7145                 dev_err(&pdev->dev, "PCI init failed\n");
7146                 goto out;
7147         }
7148
7149         /* Firmware command queue initialize */
7150         ret = hclge_cmd_queue_init(hdev);
7151         if (ret) {
7152                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7153                 goto err_pci_uninit;
7154         }
7155
7156         /* Firmware command initialize */
7157         ret = hclge_cmd_init(hdev);
7158         if (ret)
7159                 goto err_cmd_uninit;
7160
7161         ret = hclge_get_cap(hdev);
7162         if (ret) {
7163                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7164                         ret);
7165                 goto err_cmd_uninit;
7166         }
7167
7168         ret = hclge_configure(hdev);
7169         if (ret) {
7170                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7171                 goto err_cmd_uninit;
7172         }
7173
7174         ret = hclge_init_msi(hdev);
7175         if (ret) {
7176                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7177                 goto err_cmd_uninit;
7178         }
7179
7180         ret = hclge_misc_irq_init(hdev);
7181         if (ret) {
7182                 dev_err(&pdev->dev,
7183                         "Misc IRQ(vector0) init error, ret = %d.\n",
7184                         ret);
7185                 goto err_msi_uninit;
7186         }
7187
7188         ret = hclge_alloc_tqps(hdev);
7189         if (ret) {
7190                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7191                 goto err_msi_irq_uninit;
7192         }
7193
7194         ret = hclge_alloc_vport(hdev);
7195         if (ret) {
7196                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7197                 goto err_msi_irq_uninit;
7198         }
7199
7200         ret = hclge_map_tqp(hdev);
7201         if (ret) {
7202                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7203                 goto err_msi_irq_uninit;
7204         }
7205
7206         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7207                 ret = hclge_mac_mdio_config(hdev);
7208                 if (ret) {
7209                         dev_err(&hdev->pdev->dev,
7210                                 "mdio config fail ret=%d\n", ret);
7211                         goto err_msi_irq_uninit;
7212                 }
7213         }
7214
7215         ret = hclge_init_umv_space(hdev);
7216         if (ret) {
7217                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7218                 goto err_msi_irq_uninit;
7219         }
7220
7221         ret = hclge_mac_init(hdev);
7222         if (ret) {
7223                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7224                 goto err_mdiobus_unreg;
7225         }
7226
7227         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7228         if (ret) {
7229                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7230                 goto err_mdiobus_unreg;
7231         }
7232
7233         ret = hclge_config_gro(hdev, true);
7234         if (ret)
7235                 goto err_mdiobus_unreg;
7236
7237         ret = hclge_init_vlan_config(hdev);
7238         if (ret) {
7239                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7240                 goto err_mdiobus_unreg;
7241         }
7242
7243         ret = hclge_tm_schd_init(hdev);
7244         if (ret) {
7245                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7246                 goto err_mdiobus_unreg;
7247         }
7248
7249         hclge_rss_init_cfg(hdev);
7250         ret = hclge_rss_init_hw(hdev);
7251         if (ret) {
7252                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7253                 goto err_mdiobus_unreg;
7254         }
7255
7256         ret = init_mgr_tbl(hdev);
7257         if (ret) {
7258                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7259                 goto err_mdiobus_unreg;
7260         }
7261
7262         ret = hclge_init_fd_config(hdev);
7263         if (ret) {
7264                 dev_err(&pdev->dev,
7265                         "fd table init fail, ret=%d\n", ret);
7266                 goto err_mdiobus_unreg;
7267         }
7268
7269         ret = hclge_hw_error_set_state(hdev, true);
7270         if (ret) {
7271                 dev_err(&pdev->dev,
7272                         "hw error interrupts enable failed, ret =%d\n", ret);
7273                 goto err_mdiobus_unreg;
7274         }
7275
7276         hclge_dcb_ops_set(hdev);
7277
7278         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7279         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7280         INIT_WORK(&hdev->service_task, hclge_service_task);
7281         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7282         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7283
7284         hclge_clear_all_event_cause(hdev);
7285
7286         /* Enable MISC vector(vector0) */
7287         hclge_enable_vector(&hdev->misc_vector, true);
7288
7289         hclge_state_init(hdev);
7290         hdev->last_reset_time = jiffies;
7291
7292         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7293         return 0;
7294
7295 err_mdiobus_unreg:
7296         if (hdev->hw.mac.phydev)
7297                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
7298 err_msi_irq_uninit:
7299         hclge_misc_irq_uninit(hdev);
7300 err_msi_uninit:
7301         pci_free_irq_vectors(pdev);
7302 err_cmd_uninit:
7303         hclge_destroy_cmd_queue(&hdev->hw);
7304 err_pci_uninit:
7305         pcim_iounmap(pdev, hdev->hw.io_base);
7306         pci_clear_master(pdev);
7307         pci_release_regions(pdev);
7308         pci_disable_device(pdev);
7309 out:
7310         return ret;
7311 }
7312
7313 static void hclge_stats_clear(struct hclge_dev *hdev)
7314 {
7315         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7316 }
7317
7318 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7319 {
7320         struct hclge_vport *vport = hdev->vport;
7321         int i;
7322
7323         for (i = 0; i < hdev->num_alloc_vport; i++) {
7324                 hclge_vport_start(vport);
7325                 vport++;
7326         }
7327 }
7328
7329 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7330 {
7331         struct hclge_dev *hdev = ae_dev->priv;
7332         struct pci_dev *pdev = ae_dev->pdev;
7333         int ret;
7334
7335         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7336
7337         hclge_stats_clear(hdev);
7338         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7339
7340         ret = hclge_cmd_init(hdev);
7341         if (ret) {
7342                 dev_err(&pdev->dev, "Cmd queue init failed\n");
7343                 return ret;
7344         }
7345
7346         ret = hclge_get_cap(hdev);
7347         if (ret) {
7348                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7349                         ret);
7350                 return ret;
7351         }
7352
7353         ret = hclge_configure(hdev);
7354         if (ret) {
7355                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7356                 return ret;
7357         }
7358
7359         ret = hclge_map_tqp(hdev);
7360         if (ret) {
7361                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7362                 return ret;
7363         }
7364
7365         hclge_reset_umv_space(hdev);
7366
7367         ret = hclge_mac_init(hdev);
7368         if (ret) {
7369                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7370                 return ret;
7371         }
7372
7373         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7374         if (ret) {
7375                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7376                 return ret;
7377         }
7378
7379         ret = hclge_config_gro(hdev, true);
7380         if (ret)
7381                 return ret;
7382
7383         ret = hclge_init_vlan_config(hdev);
7384         if (ret) {
7385                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7386                 return ret;
7387         }
7388
7389         ret = hclge_tm_init_hw(hdev);
7390         if (ret) {
7391                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7392                 return ret;
7393         }
7394
7395         ret = hclge_rss_init_hw(hdev);
7396         if (ret) {
7397                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7398                 return ret;
7399         }
7400
7401         ret = hclge_init_fd_config(hdev);
7402         if (ret) {
7403                 dev_err(&pdev->dev,
7404                         "fd table init fail, ret=%d\n", ret);
7405                 return ret;
7406         }
7407
7408         /* Re-enable the TM hw error interrupts because
7409          * they get disabled on core/global reset.
7410          */
7411         if (hclge_enable_tm_hw_error(hdev, true))
7412                 dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
7413
7414         hclge_reset_vport_state(hdev);
7415
7416         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7417                  HCLGE_DRIVER_NAME);
7418
7419         return 0;
7420 }
7421
7422 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7423 {
7424         struct hclge_dev *hdev = ae_dev->priv;
7425         struct hclge_mac *mac = &hdev->hw.mac;
7426
7427         hclge_state_uninit(hdev);
7428
7429         if (mac->phydev)
7430                 mdiobus_unregister(mac->mdio_bus);
7431
7432         hclge_uninit_umv_space(hdev);
7433
7434         /* Disable MISC vector(vector0) */
7435         hclge_enable_vector(&hdev->misc_vector, false);
7436         synchronize_irq(hdev->misc_vector.vector_irq);
7437
7438         hclge_hw_error_set_state(hdev, false);
7439         hclge_destroy_cmd_queue(&hdev->hw);
7440         hclge_misc_irq_uninit(hdev);
7441         hclge_pci_uninit(hdev);
7442         mutex_destroy(&hdev->vport_lock);
7443         ae_dev->priv = NULL;
7444 }
7445
7446 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7447 {
7448         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7449         struct hclge_vport *vport = hclge_get_vport(handle);
7450         struct hclge_dev *hdev = vport->back;
7451
7452         return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
7453 }
7454
7455 static void hclge_get_channels(struct hnae3_handle *handle,
7456                                struct ethtool_channels *ch)
7457 {
7458         struct hclge_vport *vport = hclge_get_vport(handle);
7459
7460         ch->max_combined = hclge_get_max_channels(handle);
7461         ch->other_count = 1;
7462         ch->max_other = 1;
7463         ch->combined_count = vport->alloc_tqps;
7464 }
7465
7466 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7467                                         u16 *alloc_tqps, u16 *max_rss_size)
7468 {
7469         struct hclge_vport *vport = hclge_get_vport(handle);
7470         struct hclge_dev *hdev = vport->back;
7471
7472         *alloc_tqps = vport->alloc_tqps;
7473         *max_rss_size = hdev->rss_size_max;
7474 }
7475
7476 static void hclge_release_tqp(struct hclge_vport *vport)
7477 {
7478         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7479         struct hclge_dev *hdev = vport->back;
7480         int i;
7481
7482         for (i = 0; i < kinfo->num_tqps; i++) {
7483                 struct hclge_tqp *tqp =
7484                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
7485
7486                 tqp->q.handle = NULL;
7487                 tqp->q.tqp_index = 0;
7488                 tqp->alloced = false;
7489         }
7490
7491         devm_kfree(&hdev->pdev->dev, kinfo->tqp);
7492         kinfo->tqp = NULL;
7493 }
7494
7495 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
7496 {
7497         struct hclge_vport *vport = hclge_get_vport(handle);
7498         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7499         struct hclge_dev *hdev = vport->back;
7500         int cur_rss_size = kinfo->rss_size;
7501         int cur_tqps = kinfo->num_tqps;
7502         u16 tc_offset[HCLGE_MAX_TC_NUM];
7503         u16 tc_valid[HCLGE_MAX_TC_NUM];
7504         u16 tc_size[HCLGE_MAX_TC_NUM];
7505         u16 roundup_size;
7506         u32 *rss_indir;
7507         int ret, i;
7508
7509         /* Free old tqps, and reallocate with new tqp number when nic setup */
7510         hclge_release_tqp(vport);
7511
7512         ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
7513         if (ret) {
7514                 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
7515                 return ret;
7516         }
7517
7518         ret = hclge_map_tqp_to_vport(hdev, vport);
7519         if (ret) {
7520                 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
7521                 return ret;
7522         }
7523
7524         ret = hclge_tm_schd_init(hdev);
7525         if (ret) {
7526                 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
7527                 return ret;
7528         }
7529
7530         roundup_size = roundup_pow_of_two(kinfo->rss_size);
7531         roundup_size = ilog2(roundup_size);
7532         /* Set the RSS TC mode according to the new RSS size */
7533         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7534                 tc_valid[i] = 0;
7535
7536                 if (!(hdev->hw_tc_map & BIT(i)))
7537                         continue;
7538
7539                 tc_valid[i] = 1;
7540                 tc_size[i] = roundup_size;
7541                 tc_offset[i] = kinfo->rss_size * i;
7542         }
7543         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7544         if (ret)
7545                 return ret;
7546
7547         /* Reinitializes the rss indirect table according to the new RSS size */
7548         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7549         if (!rss_indir)
7550                 return -ENOMEM;
7551
7552         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7553                 rss_indir[i] = i % kinfo->rss_size;
7554
7555         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7556         if (ret)
7557                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7558                         ret);
7559
7560         kfree(rss_indir);
7561
7562         if (!ret)
7563                 dev_info(&hdev->pdev->dev,
7564                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7565                          cur_rss_size, kinfo->rss_size,
7566                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
7567
7568         return ret;
7569 }
7570
7571 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7572                               u32 *regs_num_64_bit)
7573 {
7574         struct hclge_desc desc;
7575         u32 total_num;
7576         int ret;
7577
7578         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7579         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7580         if (ret) {
7581                 dev_err(&hdev->pdev->dev,
7582                         "Query register number cmd failed, ret = %d.\n", ret);
7583                 return ret;
7584         }
7585
7586         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
7587         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
7588
7589         total_num = *regs_num_32_bit + *regs_num_64_bit;
7590         if (!total_num)
7591                 return -EINVAL;
7592
7593         return 0;
7594 }
7595
7596 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7597                                  void *data)
7598 {
7599 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7600
7601         struct hclge_desc *desc;
7602         u32 *reg_val = data;
7603         __le32 *desc_data;
7604         int cmd_num;
7605         int i, k, n;
7606         int ret;
7607
7608         if (regs_num == 0)
7609                 return 0;
7610
7611         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7612         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7613         if (!desc)
7614                 return -ENOMEM;
7615
7616         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7617         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7618         if (ret) {
7619                 dev_err(&hdev->pdev->dev,
7620                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
7621                 kfree(desc);
7622                 return ret;
7623         }
7624
7625         for (i = 0; i < cmd_num; i++) {
7626                 if (i == 0) {
7627                         desc_data = (__le32 *)(&desc[i].data[0]);
7628                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
7629                 } else {
7630                         desc_data = (__le32 *)(&desc[i]);
7631                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
7632                 }
7633                 for (k = 0; k < n; k++) {
7634                         *reg_val++ = le32_to_cpu(*desc_data++);
7635
7636                         regs_num--;
7637                         if (!regs_num)
7638                                 break;
7639                 }
7640         }
7641
7642         kfree(desc);
7643         return 0;
7644 }
7645
7646 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7647                                  void *data)
7648 {
7649 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7650
7651         struct hclge_desc *desc;
7652         u64 *reg_val = data;
7653         __le64 *desc_data;
7654         int cmd_num;
7655         int i, k, n;
7656         int ret;
7657
7658         if (regs_num == 0)
7659                 return 0;
7660
7661         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
7662         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7663         if (!desc)
7664                 return -ENOMEM;
7665
7666         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
7667         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7668         if (ret) {
7669                 dev_err(&hdev->pdev->dev,
7670                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
7671                 kfree(desc);
7672                 return ret;
7673         }
7674
7675         for (i = 0; i < cmd_num; i++) {
7676                 if (i == 0) {
7677                         desc_data = (__le64 *)(&desc[i].data[0]);
7678                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
7679                 } else {
7680                         desc_data = (__le64 *)(&desc[i]);
7681                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
7682                 }
7683                 for (k = 0; k < n; k++) {
7684                         *reg_val++ = le64_to_cpu(*desc_data++);
7685
7686                         regs_num--;
7687                         if (!regs_num)
7688                                 break;
7689                 }
7690         }
7691
7692         kfree(desc);
7693         return 0;
7694 }
7695
7696 #define MAX_SEPARATE_NUM        4
7697 #define SEPARATOR_VALUE         0xFFFFFFFF
7698 #define REG_NUM_PER_LINE        4
7699 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
7700
7701 static int hclge_get_regs_len(struct hnae3_handle *handle)
7702 {
7703         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
7704         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7705         struct hclge_vport *vport = hclge_get_vport(handle);
7706         struct hclge_dev *hdev = vport->back;
7707         u32 regs_num_32_bit, regs_num_64_bit;
7708         int ret;
7709
7710         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7711         if (ret) {
7712                 dev_err(&hdev->pdev->dev,
7713                         "Get register number failed, ret = %d.\n", ret);
7714                 return -EOPNOTSUPP;
7715         }
7716
7717         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
7718         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
7719         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
7720         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
7721
7722         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
7723                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
7724                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
7725 }
7726
7727 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
7728                            void *data)
7729 {
7730         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7731         struct hclge_vport *vport = hclge_get_vport(handle);
7732         struct hclge_dev *hdev = vport->back;
7733         u32 regs_num_32_bit, regs_num_64_bit;
7734         int i, j, reg_um, separator_num;
7735         u32 *reg = data;
7736         int ret;
7737
7738         *version = hdev->fw_version;
7739
7740         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7741         if (ret) {
7742                 dev_err(&hdev->pdev->dev,
7743                         "Get register number failed, ret = %d.\n", ret);
7744                 return;
7745         }
7746
7747         /* fetching per-PF registers valus from PF PCIe register space */
7748         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
7749         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7750         for (i = 0; i < reg_um; i++)
7751                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
7752         for (i = 0; i < separator_num; i++)
7753                 *reg++ = SEPARATOR_VALUE;
7754
7755         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
7756         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7757         for (i = 0; i < reg_um; i++)
7758                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
7759         for (i = 0; i < separator_num; i++)
7760                 *reg++ = SEPARATOR_VALUE;
7761
7762         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
7763         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7764         for (j = 0; j < kinfo->num_tqps; j++) {
7765                 for (i = 0; i < reg_um; i++)
7766                         *reg++ = hclge_read_dev(&hdev->hw,
7767                                                 ring_reg_addr_list[i] +
7768                                                 0x200 * j);
7769                 for (i = 0; i < separator_num; i++)
7770                         *reg++ = SEPARATOR_VALUE;
7771         }
7772
7773         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
7774         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7775         for (j = 0; j < hdev->num_msi_used - 1; j++) {
7776                 for (i = 0; i < reg_um; i++)
7777                         *reg++ = hclge_read_dev(&hdev->hw,
7778                                                 tqp_intr_reg_addr_list[i] +
7779                                                 4 * j);
7780                 for (i = 0; i < separator_num; i++)
7781                         *reg++ = SEPARATOR_VALUE;
7782         }
7783
7784         /* fetching PF common registers values from firmware */
7785         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
7786         if (ret) {
7787                 dev_err(&hdev->pdev->dev,
7788                         "Get 32 bit register failed, ret = %d.\n", ret);
7789                 return;
7790         }
7791
7792         reg += regs_num_32_bit;
7793         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
7794         if (ret)
7795                 dev_err(&hdev->pdev->dev,
7796                         "Get 64 bit register failed, ret = %d.\n", ret);
7797 }
7798
7799 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
7800 {
7801         struct hclge_set_led_state_cmd *req;
7802         struct hclge_desc desc;
7803         int ret;
7804
7805         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
7806
7807         req = (struct hclge_set_led_state_cmd *)desc.data;
7808         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
7809                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
7810
7811         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7812         if (ret)
7813                 dev_err(&hdev->pdev->dev,
7814                         "Send set led state cmd error, ret =%d\n", ret);
7815
7816         return ret;
7817 }
7818
7819 enum hclge_led_status {
7820         HCLGE_LED_OFF,
7821         HCLGE_LED_ON,
7822         HCLGE_LED_NO_CHANGE = 0xFF,
7823 };
7824
7825 static int hclge_set_led_id(struct hnae3_handle *handle,
7826                             enum ethtool_phys_id_state status)
7827 {
7828         struct hclge_vport *vport = hclge_get_vport(handle);
7829         struct hclge_dev *hdev = vport->back;
7830
7831         switch (status) {
7832         case ETHTOOL_ID_ACTIVE:
7833                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
7834         case ETHTOOL_ID_INACTIVE:
7835                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
7836         default:
7837                 return -EINVAL;
7838         }
7839 }
7840
7841 static void hclge_get_link_mode(struct hnae3_handle *handle,
7842                                 unsigned long *supported,
7843                                 unsigned long *advertising)
7844 {
7845         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
7846         struct hclge_vport *vport = hclge_get_vport(handle);
7847         struct hclge_dev *hdev = vport->back;
7848         unsigned int idx = 0;
7849
7850         for (; idx < size; idx++) {
7851                 supported[idx] = hdev->hw.mac.supported[idx];
7852                 advertising[idx] = hdev->hw.mac.advertising[idx];
7853         }
7854 }
7855
7856 static int hclge_gro_en(struct hnae3_handle *handle, int enable)
7857 {
7858         struct hclge_vport *vport = hclge_get_vport(handle);
7859         struct hclge_dev *hdev = vport->back;
7860
7861         return hclge_config_gro(hdev, enable);
7862 }
7863
7864 static const struct hnae3_ae_ops hclge_ops = {
7865         .init_ae_dev = hclge_init_ae_dev,
7866         .uninit_ae_dev = hclge_uninit_ae_dev,
7867         .flr_prepare = hclge_flr_prepare,
7868         .flr_done = hclge_flr_done,
7869         .init_client_instance = hclge_init_client_instance,
7870         .uninit_client_instance = hclge_uninit_client_instance,
7871         .map_ring_to_vector = hclge_map_ring_to_vector,
7872         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
7873         .get_vector = hclge_get_vector,
7874         .put_vector = hclge_put_vector,
7875         .set_promisc_mode = hclge_set_promisc_mode,
7876         .set_loopback = hclge_set_loopback,
7877         .start = hclge_ae_start,
7878         .stop = hclge_ae_stop,
7879         .client_start = hclge_client_start,
7880         .client_stop = hclge_client_stop,
7881         .get_status = hclge_get_status,
7882         .get_ksettings_an_result = hclge_get_ksettings_an_result,
7883         .update_speed_duplex_h = hclge_update_speed_duplex_h,
7884         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
7885         .get_media_type = hclge_get_media_type,
7886         .get_rss_key_size = hclge_get_rss_key_size,
7887         .get_rss_indir_size = hclge_get_rss_indir_size,
7888         .get_rss = hclge_get_rss,
7889         .set_rss = hclge_set_rss,
7890         .set_rss_tuple = hclge_set_rss_tuple,
7891         .get_rss_tuple = hclge_get_rss_tuple,
7892         .get_tc_size = hclge_get_tc_size,
7893         .get_mac_addr = hclge_get_mac_addr,
7894         .set_mac_addr = hclge_set_mac_addr,
7895         .do_ioctl = hclge_do_ioctl,
7896         .add_uc_addr = hclge_add_uc_addr,
7897         .rm_uc_addr = hclge_rm_uc_addr,
7898         .add_mc_addr = hclge_add_mc_addr,
7899         .rm_mc_addr = hclge_rm_mc_addr,
7900         .set_autoneg = hclge_set_autoneg,
7901         .get_autoneg = hclge_get_autoneg,
7902         .get_pauseparam = hclge_get_pauseparam,
7903         .set_pauseparam = hclge_set_pauseparam,
7904         .set_mtu = hclge_set_mtu,
7905         .reset_queue = hclge_reset_tqp,
7906         .get_stats = hclge_get_stats,
7907         .update_stats = hclge_update_stats,
7908         .get_strings = hclge_get_strings,
7909         .get_sset_count = hclge_get_sset_count,
7910         .get_fw_version = hclge_get_fw_version,
7911         .get_mdix_mode = hclge_get_mdix_mode,
7912         .enable_vlan_filter = hclge_enable_vlan_filter,
7913         .set_vlan_filter = hclge_set_vlan_filter,
7914         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
7915         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
7916         .reset_event = hclge_reset_event,
7917         .set_default_reset_request = hclge_set_def_reset_request,
7918         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
7919         .set_channels = hclge_set_channels,
7920         .get_channels = hclge_get_channels,
7921         .get_regs_len = hclge_get_regs_len,
7922         .get_regs = hclge_get_regs,
7923         .set_led_id = hclge_set_led_id,
7924         .get_link_mode = hclge_get_link_mode,
7925         .add_fd_entry = hclge_add_fd_entry,
7926         .del_fd_entry = hclge_del_fd_entry,
7927         .del_all_fd_entries = hclge_del_all_fd_entries,
7928         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
7929         .get_fd_rule_info = hclge_get_fd_rule_info,
7930         .get_fd_all_rules = hclge_get_all_rules,
7931         .restore_fd_rules = hclge_restore_fd_entries,
7932         .enable_fd = hclge_enable_fd,
7933         .dbg_run_cmd = hclge_dbg_run_cmd,
7934         .process_hw_error = hclge_process_ras_hw_error,
7935         .get_hw_reset_stat = hclge_get_hw_reset_stat,
7936         .ae_dev_resetting = hclge_ae_dev_resetting,
7937         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
7938         .set_gro_en = hclge_gro_en,
7939 };
7940
7941 static struct hnae3_ae_algo ae_algo = {
7942         .ops = &hclge_ops,
7943         .pdev_id_table = ae_algo_pci_tbl,
7944 };
7945
7946 static int hclge_init(void)
7947 {
7948         pr_info("%s is initializing\n", HCLGE_NAME);
7949
7950         hnae3_register_ae_algo(&ae_algo);
7951
7952         return 0;
7953 }
7954
7955 static void hclge_exit(void)
7956 {
7957         hnae3_unregister_ae_algo(&ae_algo);
7958 }
7959 module_init(hclge_init);
7960 module_exit(hclge_exit);
7961
7962 MODULE_LICENSE("GPL");
7963 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
7964 MODULE_DESCRIPTION("HCLGE Driver");
7965 MODULE_VERSION(HCLGE_MOD_VERSION);