1c8cf840dff8ff14fa56daae78d1ad1aadedfab4
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
21 #include "hclge_tm.h"
22 #include "hclge_err.h"
23 #include "hnae3.h"
24
25 #define HCLGE_NAME                      "hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
28
29 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
30 static int hclge_init_vlan_config(struct hclge_dev *hdev);
31 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
32 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
33                                u16 *allocated_size, bool is_alloc);
34
35 static struct hnae3_ae_algo ae_algo;
36
37 static const struct pci_device_id ae_algo_pci_tbl[] = {
38         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
39         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
40         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
41         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
42         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
43         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
45         /* required last entry */
46         {0, }
47 };
48
49 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
50
51 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
52         "App    Loopback test",
53         "Serdes serial Loopback test",
54         "Serdes parallel Loopback test",
55         "Phy    Loopback test"
56 };
57
58 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
59         {"mac_tx_mac_pause_num",
60                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
61         {"mac_rx_mac_pause_num",
62                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
63         {"mac_tx_pfc_pri0_pkt_num",
64                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
65         {"mac_tx_pfc_pri1_pkt_num",
66                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
67         {"mac_tx_pfc_pri2_pkt_num",
68                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
69         {"mac_tx_pfc_pri3_pkt_num",
70                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
71         {"mac_tx_pfc_pri4_pkt_num",
72                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
73         {"mac_tx_pfc_pri5_pkt_num",
74                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
75         {"mac_tx_pfc_pri6_pkt_num",
76                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
77         {"mac_tx_pfc_pri7_pkt_num",
78                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
79         {"mac_rx_pfc_pri0_pkt_num",
80                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
81         {"mac_rx_pfc_pri1_pkt_num",
82                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
83         {"mac_rx_pfc_pri2_pkt_num",
84                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
85         {"mac_rx_pfc_pri3_pkt_num",
86                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
87         {"mac_rx_pfc_pri4_pkt_num",
88                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
89         {"mac_rx_pfc_pri5_pkt_num",
90                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
91         {"mac_rx_pfc_pri6_pkt_num",
92                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
93         {"mac_rx_pfc_pri7_pkt_num",
94                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
95         {"mac_tx_total_pkt_num",
96                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
97         {"mac_tx_total_oct_num",
98                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
99         {"mac_tx_good_pkt_num",
100                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
101         {"mac_tx_bad_pkt_num",
102                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
103         {"mac_tx_good_oct_num",
104                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
105         {"mac_tx_bad_oct_num",
106                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
107         {"mac_tx_uni_pkt_num",
108                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
109         {"mac_tx_multi_pkt_num",
110                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
111         {"mac_tx_broad_pkt_num",
112                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
113         {"mac_tx_undersize_pkt_num",
114                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
115         {"mac_tx_oversize_pkt_num",
116                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
117         {"mac_tx_64_oct_pkt_num",
118                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
119         {"mac_tx_65_127_oct_pkt_num",
120                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
121         {"mac_tx_128_255_oct_pkt_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
123         {"mac_tx_256_511_oct_pkt_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
125         {"mac_tx_512_1023_oct_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
127         {"mac_tx_1024_1518_oct_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
129         {"mac_tx_1519_2047_oct_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
131         {"mac_tx_2048_4095_oct_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
133         {"mac_tx_4096_8191_oct_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
135         {"mac_tx_8192_9216_oct_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
137         {"mac_tx_9217_12287_oct_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
139         {"mac_tx_12288_16383_oct_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
141         {"mac_tx_1519_max_good_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
143         {"mac_tx_1519_max_bad_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
145         {"mac_rx_total_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
147         {"mac_rx_total_oct_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
149         {"mac_rx_good_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
151         {"mac_rx_bad_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
153         {"mac_rx_good_oct_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
155         {"mac_rx_bad_oct_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
157         {"mac_rx_uni_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
159         {"mac_rx_multi_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
161         {"mac_rx_broad_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
163         {"mac_rx_undersize_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
165         {"mac_rx_oversize_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
167         {"mac_rx_64_oct_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
169         {"mac_rx_65_127_oct_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
171         {"mac_rx_128_255_oct_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
173         {"mac_rx_256_511_oct_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
175         {"mac_rx_512_1023_oct_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
177         {"mac_rx_1024_1518_oct_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
179         {"mac_rx_1519_2047_oct_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
181         {"mac_rx_2048_4095_oct_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
183         {"mac_rx_4096_8191_oct_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
185         {"mac_rx_8192_9216_oct_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
187         {"mac_rx_9217_12287_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
189         {"mac_rx_12288_16383_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
191         {"mac_rx_1519_max_good_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
193         {"mac_rx_1519_max_bad_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
195
196         {"mac_tx_fragment_pkt_num",
197                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
198         {"mac_tx_undermin_pkt_num",
199                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
200         {"mac_tx_jabber_pkt_num",
201                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
202         {"mac_tx_err_all_pkt_num",
203                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
204         {"mac_tx_from_app_good_pkt_num",
205                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
206         {"mac_tx_from_app_bad_pkt_num",
207                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
208         {"mac_rx_fragment_pkt_num",
209                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
210         {"mac_rx_undermin_pkt_num",
211                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
212         {"mac_rx_jabber_pkt_num",
213                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
214         {"mac_rx_fcs_err_pkt_num",
215                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
216         {"mac_rx_send_app_good_pkt_num",
217                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
218         {"mac_rx_send_app_bad_pkt_num",
219                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
220 };
221
222 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
223         {
224                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
225                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
226                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
227                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
228                 .i_port_bitmap = 0x1,
229         },
230 };
231
232 static int hclge_mac_update_stats(struct hclge_dev *hdev)
233 {
234 #define HCLGE_MAC_CMD_NUM 21
235 #define HCLGE_RTN_DATA_NUM 4
236
237         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
238         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
239         __le64 *desc_data;
240         int i, k, n;
241         int ret;
242
243         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
244         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
245         if (ret) {
246                 dev_err(&hdev->pdev->dev,
247                         "Get MAC pkt stats fail, status = %d.\n", ret);
248
249                 return ret;
250         }
251
252         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
253                 if (unlikely(i == 0)) {
254                         desc_data = (__le64 *)(&desc[i].data[0]);
255                         n = HCLGE_RTN_DATA_NUM - 2;
256                 } else {
257                         desc_data = (__le64 *)(&desc[i]);
258                         n = HCLGE_RTN_DATA_NUM;
259                 }
260                 for (k = 0; k < n; k++) {
261                         *data++ += le64_to_cpu(*desc_data);
262                         desc_data++;
263                 }
264         }
265
266         return 0;
267 }
268
269 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
270 {
271         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
272         struct hclge_vport *vport = hclge_get_vport(handle);
273         struct hclge_dev *hdev = vport->back;
274         struct hnae3_queue *queue;
275         struct hclge_desc desc[1];
276         struct hclge_tqp *tqp;
277         int ret, i;
278
279         for (i = 0; i < kinfo->num_tqps; i++) {
280                 queue = handle->kinfo.tqp[i];
281                 tqp = container_of(queue, struct hclge_tqp, q);
282                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
283                 hclge_cmd_setup_basic_desc(&desc[0],
284                                            HCLGE_OPC_QUERY_RX_STATUS,
285                                            true);
286
287                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
288                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
289                 if (ret) {
290                         dev_err(&hdev->pdev->dev,
291                                 "Query tqp stat fail, status = %d,queue = %d\n",
292                                 ret,    i);
293                         return ret;
294                 }
295                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
296                         le32_to_cpu(desc[0].data[1]);
297         }
298
299         for (i = 0; i < kinfo->num_tqps; i++) {
300                 queue = handle->kinfo.tqp[i];
301                 tqp = container_of(queue, struct hclge_tqp, q);
302                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
303                 hclge_cmd_setup_basic_desc(&desc[0],
304                                            HCLGE_OPC_QUERY_TX_STATUS,
305                                            true);
306
307                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
308                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
309                 if (ret) {
310                         dev_err(&hdev->pdev->dev,
311                                 "Query tqp stat fail, status = %d,queue = %d\n",
312                                 ret, i);
313                         return ret;
314                 }
315                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
316                         le32_to_cpu(desc[0].data[1]);
317         }
318
319         return 0;
320 }
321
322 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
323 {
324         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
325         struct hclge_tqp *tqp;
326         u64 *buff = data;
327         int i;
328
329         for (i = 0; i < kinfo->num_tqps; i++) {
330                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
331                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
332         }
333
334         for (i = 0; i < kinfo->num_tqps; i++) {
335                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
336                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
337         }
338
339         return buff;
340 }
341
342 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
343 {
344         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
345
346         return kinfo->num_tqps * (2);
347 }
348
349 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
350 {
351         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
352         u8 *buff = data;
353         int i = 0;
354
355         for (i = 0; i < kinfo->num_tqps; i++) {
356                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
357                         struct hclge_tqp, q);
358                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
359                          tqp->index);
360                 buff = buff + ETH_GSTRING_LEN;
361         }
362
363         for (i = 0; i < kinfo->num_tqps; i++) {
364                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
365                         struct hclge_tqp, q);
366                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
367                          tqp->index);
368                 buff = buff + ETH_GSTRING_LEN;
369         }
370
371         return buff;
372 }
373
374 static u64 *hclge_comm_get_stats(void *comm_stats,
375                                  const struct hclge_comm_stats_str strs[],
376                                  int size, u64 *data)
377 {
378         u64 *buf = data;
379         u32 i;
380
381         for (i = 0; i < size; i++)
382                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
383
384         return buf + size;
385 }
386
387 static u8 *hclge_comm_get_strings(u32 stringset,
388                                   const struct hclge_comm_stats_str strs[],
389                                   int size, u8 *data)
390 {
391         char *buff = (char *)data;
392         u32 i;
393
394         if (stringset != ETH_SS_STATS)
395                 return buff;
396
397         for (i = 0; i < size; i++) {
398                 snprintf(buff, ETH_GSTRING_LEN,
399                          strs[i].desc);
400                 buff = buff + ETH_GSTRING_LEN;
401         }
402
403         return (u8 *)buff;
404 }
405
406 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
407                                  struct net_device_stats *net_stats)
408 {
409         net_stats->tx_dropped = 0;
410         net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
411         net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
412         net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
413
414         net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
415         net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
416
417         net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
418         net_stats->rx_length_errors =
419                 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
420         net_stats->rx_length_errors +=
421                 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
422         net_stats->rx_over_errors =
423                 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
424 }
425
426 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
427 {
428         struct hnae3_handle *handle;
429         int status;
430
431         handle = &hdev->vport[0].nic;
432         if (handle->client) {
433                 status = hclge_tqps_update_stats(handle);
434                 if (status) {
435                         dev_err(&hdev->pdev->dev,
436                                 "Update TQPS stats fail, status = %d.\n",
437                                 status);
438                 }
439         }
440
441         status = hclge_mac_update_stats(hdev);
442         if (status)
443                 dev_err(&hdev->pdev->dev,
444                         "Update MAC stats fail, status = %d.\n", status);
445
446         hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
447 }
448
449 static void hclge_update_stats(struct hnae3_handle *handle,
450                                struct net_device_stats *net_stats)
451 {
452         struct hclge_vport *vport = hclge_get_vport(handle);
453         struct hclge_dev *hdev = vport->back;
454         struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
455         int status;
456
457         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
458                 return;
459
460         status = hclge_mac_update_stats(hdev);
461         if (status)
462                 dev_err(&hdev->pdev->dev,
463                         "Update MAC stats fail, status = %d.\n",
464                         status);
465
466         status = hclge_tqps_update_stats(handle);
467         if (status)
468                 dev_err(&hdev->pdev->dev,
469                         "Update TQPS stats fail, status = %d.\n",
470                         status);
471
472         hclge_update_netstat(hw_stats, net_stats);
473
474         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
475 }
476
477 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
478 {
479 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
480                 HNAE3_SUPPORT_PHY_LOOPBACK |\
481                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
482                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
483
484         struct hclge_vport *vport = hclge_get_vport(handle);
485         struct hclge_dev *hdev = vport->back;
486         int count = 0;
487
488         /* Loopback test support rules:
489          * mac: only GE mode support
490          * serdes: all mac mode will support include GE/XGE/LGE/CGE
491          * phy: only support when phy device exist on board
492          */
493         if (stringset == ETH_SS_TEST) {
494                 /* clear loopback bit flags at first */
495                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
496                 if (hdev->pdev->revision >= 0x21 ||
497                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
498                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
499                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
500                         count += 1;
501                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
502                 }
503
504                 count += 2;
505                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
506                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
507         } else if (stringset == ETH_SS_STATS) {
508                 count = ARRAY_SIZE(g_mac_stats_string) +
509                         hclge_tqps_get_sset_count(handle, stringset);
510         }
511
512         return count;
513 }
514
515 static void hclge_get_strings(struct hnae3_handle *handle,
516                               u32 stringset,
517                               u8 *data)
518 {
519         u8 *p = (char *)data;
520         int size;
521
522         if (stringset == ETH_SS_STATS) {
523                 size = ARRAY_SIZE(g_mac_stats_string);
524                 p = hclge_comm_get_strings(stringset,
525                                            g_mac_stats_string,
526                                            size,
527                                            p);
528                 p = hclge_tqps_get_strings(handle, p);
529         } else if (stringset == ETH_SS_TEST) {
530                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
531                         memcpy(p,
532                                hns3_nic_test_strs[HNAE3_LOOP_APP],
533                                ETH_GSTRING_LEN);
534                         p += ETH_GSTRING_LEN;
535                 }
536                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
537                         memcpy(p,
538                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
539                                ETH_GSTRING_LEN);
540                         p += ETH_GSTRING_LEN;
541                 }
542                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
543                         memcpy(p,
544                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
545                                ETH_GSTRING_LEN);
546                         p += ETH_GSTRING_LEN;
547                 }
548                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
549                         memcpy(p,
550                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
551                                ETH_GSTRING_LEN);
552                         p += ETH_GSTRING_LEN;
553                 }
554         }
555 }
556
557 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
558 {
559         struct hclge_vport *vport = hclge_get_vport(handle);
560         struct hclge_dev *hdev = vport->back;
561         u64 *p;
562
563         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
564                                  g_mac_stats_string,
565                                  ARRAY_SIZE(g_mac_stats_string),
566                                  data);
567         p = hclge_tqps_get_stats(handle, p);
568 }
569
570 static int hclge_parse_func_status(struct hclge_dev *hdev,
571                                    struct hclge_func_status_cmd *status)
572 {
573         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
574                 return -EINVAL;
575
576         /* Set the pf to main pf */
577         if (status->pf_state & HCLGE_PF_STATE_MAIN)
578                 hdev->flag |= HCLGE_FLAG_MAIN;
579         else
580                 hdev->flag &= ~HCLGE_FLAG_MAIN;
581
582         return 0;
583 }
584
585 static int hclge_query_function_status(struct hclge_dev *hdev)
586 {
587         struct hclge_func_status_cmd *req;
588         struct hclge_desc desc;
589         int timeout = 0;
590         int ret;
591
592         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
593         req = (struct hclge_func_status_cmd *)desc.data;
594
595         do {
596                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
597                 if (ret) {
598                         dev_err(&hdev->pdev->dev,
599                                 "query function status failed %d.\n",
600                                 ret);
601
602                         return ret;
603                 }
604
605                 /* Check pf reset is done */
606                 if (req->pf_state)
607                         break;
608                 usleep_range(1000, 2000);
609         } while (timeout++ < 5);
610
611         ret = hclge_parse_func_status(hdev, req);
612
613         return ret;
614 }
615
616 static int hclge_query_pf_resource(struct hclge_dev *hdev)
617 {
618         struct hclge_pf_res_cmd *req;
619         struct hclge_desc desc;
620         int ret;
621
622         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
623         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
624         if (ret) {
625                 dev_err(&hdev->pdev->dev,
626                         "query pf resource failed %d.\n", ret);
627                 return ret;
628         }
629
630         req = (struct hclge_pf_res_cmd *)desc.data;
631         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
632         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
633
634         if (hnae3_dev_roce_supported(hdev)) {
635                 hdev->roce_base_msix_offset =
636                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
637                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
638                 hdev->num_roce_msi =
639                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
640                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
641
642                 /* PF should have NIC vectors and Roce vectors,
643                  * NIC vectors are queued before Roce vectors.
644                  */
645                 hdev->num_msi = hdev->num_roce_msi  +
646                                 hdev->roce_base_msix_offset;
647         } else {
648                 hdev->num_msi =
649                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
650                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
651         }
652
653         return 0;
654 }
655
656 static int hclge_parse_speed(int speed_cmd, int *speed)
657 {
658         switch (speed_cmd) {
659         case 6:
660                 *speed = HCLGE_MAC_SPEED_10M;
661                 break;
662         case 7:
663                 *speed = HCLGE_MAC_SPEED_100M;
664                 break;
665         case 0:
666                 *speed = HCLGE_MAC_SPEED_1G;
667                 break;
668         case 1:
669                 *speed = HCLGE_MAC_SPEED_10G;
670                 break;
671         case 2:
672                 *speed = HCLGE_MAC_SPEED_25G;
673                 break;
674         case 3:
675                 *speed = HCLGE_MAC_SPEED_40G;
676                 break;
677         case 4:
678                 *speed = HCLGE_MAC_SPEED_50G;
679                 break;
680         case 5:
681                 *speed = HCLGE_MAC_SPEED_100G;
682                 break;
683         default:
684                 return -EINVAL;
685         }
686
687         return 0;
688 }
689
690 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
691                                         u8 speed_ability)
692 {
693         unsigned long *supported = hdev->hw.mac.supported;
694
695         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
696                 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
697                         supported);
698
699         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
700                 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
701                         supported);
702
703         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
704                 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
705                         supported);
706
707         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
708                 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
709                         supported);
710
711         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
712                 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
713                         supported);
714
715         set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
716         set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
717 }
718
719 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
720 {
721         u8 media_type = hdev->hw.mac.media_type;
722
723         if (media_type != HNAE3_MEDIA_TYPE_FIBER)
724                 return;
725
726         hclge_parse_fiber_link_mode(hdev, speed_ability);
727 }
728
729 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
730 {
731         struct hclge_cfg_param_cmd *req;
732         u64 mac_addr_tmp_high;
733         u64 mac_addr_tmp;
734         int i;
735
736         req = (struct hclge_cfg_param_cmd *)desc[0].data;
737
738         /* get the configuration */
739         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
740                                               HCLGE_CFG_VMDQ_M,
741                                               HCLGE_CFG_VMDQ_S);
742         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
743                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
744         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
745                                             HCLGE_CFG_TQP_DESC_N_M,
746                                             HCLGE_CFG_TQP_DESC_N_S);
747
748         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
749                                         HCLGE_CFG_PHY_ADDR_M,
750                                         HCLGE_CFG_PHY_ADDR_S);
751         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
752                                           HCLGE_CFG_MEDIA_TP_M,
753                                           HCLGE_CFG_MEDIA_TP_S);
754         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
755                                           HCLGE_CFG_RX_BUF_LEN_M,
756                                           HCLGE_CFG_RX_BUF_LEN_S);
757         /* get mac_address */
758         mac_addr_tmp = __le32_to_cpu(req->param[2]);
759         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
760                                             HCLGE_CFG_MAC_ADDR_H_M,
761                                             HCLGE_CFG_MAC_ADDR_H_S);
762
763         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
764
765         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
766                                              HCLGE_CFG_DEFAULT_SPEED_M,
767                                              HCLGE_CFG_DEFAULT_SPEED_S);
768         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
769                                             HCLGE_CFG_RSS_SIZE_M,
770                                             HCLGE_CFG_RSS_SIZE_S);
771
772         for (i = 0; i < ETH_ALEN; i++)
773                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
774
775         req = (struct hclge_cfg_param_cmd *)desc[1].data;
776         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
777
778         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
779                                              HCLGE_CFG_SPEED_ABILITY_M,
780                                              HCLGE_CFG_SPEED_ABILITY_S);
781         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
782                                          HCLGE_CFG_UMV_TBL_SPACE_M,
783                                          HCLGE_CFG_UMV_TBL_SPACE_S);
784         if (!cfg->umv_space)
785                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
786 }
787
788 /* hclge_get_cfg: query the static parameter from flash
789  * @hdev: pointer to struct hclge_dev
790  * @hcfg: the config structure to be getted
791  */
792 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
793 {
794         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
795         struct hclge_cfg_param_cmd *req;
796         int i, ret;
797
798         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
799                 u32 offset = 0;
800
801                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
802                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
803                                            true);
804                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
805                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
806                 /* Len should be united by 4 bytes when send to hardware */
807                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
808                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
809                 req->offset = cpu_to_le32(offset);
810         }
811
812         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
813         if (ret) {
814                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
815                 return ret;
816         }
817
818         hclge_parse_cfg(hcfg, desc);
819
820         return 0;
821 }
822
823 static int hclge_get_cap(struct hclge_dev *hdev)
824 {
825         int ret;
826
827         ret = hclge_query_function_status(hdev);
828         if (ret) {
829                 dev_err(&hdev->pdev->dev,
830                         "query function status error %d.\n", ret);
831                 return ret;
832         }
833
834         /* get pf resource */
835         ret = hclge_query_pf_resource(hdev);
836         if (ret)
837                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
838
839         return ret;
840 }
841
842 static int hclge_configure(struct hclge_dev *hdev)
843 {
844         struct hclge_cfg cfg;
845         int ret, i;
846
847         ret = hclge_get_cfg(hdev, &cfg);
848         if (ret) {
849                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
850                 return ret;
851         }
852
853         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
854         hdev->base_tqp_pid = 0;
855         hdev->rss_size_max = cfg.rss_size_max;
856         hdev->rx_buf_len = cfg.rx_buf_len;
857         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
858         hdev->hw.mac.media_type = cfg.media_type;
859         hdev->hw.mac.phy_addr = cfg.phy_addr;
860         hdev->num_desc = cfg.tqp_desc_num;
861         hdev->tm_info.num_pg = 1;
862         hdev->tc_max = cfg.tc_num;
863         hdev->tm_info.hw_pfc_map = 0;
864         hdev->wanted_umv_size = cfg.umv_space;
865
866         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
867         if (ret) {
868                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
869                 return ret;
870         }
871
872         hclge_parse_link_mode(hdev, cfg.speed_ability);
873
874         if ((hdev->tc_max > HNAE3_MAX_TC) ||
875             (hdev->tc_max < 1)) {
876                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
877                          hdev->tc_max);
878                 hdev->tc_max = 1;
879         }
880
881         /* Dev does not support DCB */
882         if (!hnae3_dev_dcb_supported(hdev)) {
883                 hdev->tc_max = 1;
884                 hdev->pfc_max = 0;
885         } else {
886                 hdev->pfc_max = hdev->tc_max;
887         }
888
889         hdev->tm_info.num_tc = hdev->tc_max;
890
891         /* Currently not support uncontiuous tc */
892         for (i = 0; i < hdev->tm_info.num_tc; i++)
893                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
894
895         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
896
897         return ret;
898 }
899
900 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
901                             int tso_mss_max)
902 {
903         struct hclge_cfg_tso_status_cmd *req;
904         struct hclge_desc desc;
905         u16 tso_mss;
906
907         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
908
909         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
910
911         tso_mss = 0;
912         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
913                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
914         req->tso_mss_min = cpu_to_le16(tso_mss);
915
916         tso_mss = 0;
917         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
918                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
919         req->tso_mss_max = cpu_to_le16(tso_mss);
920
921         return hclge_cmd_send(&hdev->hw, &desc, 1);
922 }
923
924 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
925 {
926         struct hclge_cfg_gro_status_cmd *req;
927         struct hclge_desc desc;
928         int ret;
929
930         if (!hnae3_dev_gro_supported(hdev))
931                 return 0;
932
933         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
934         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
935
936         req->gro_en = cpu_to_le16(en ? 1 : 0);
937
938         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
939         if (ret)
940                 dev_err(&hdev->pdev->dev,
941                         "GRO hardware config cmd failed, ret = %d\n", ret);
942
943         return ret;
944 }
945
946 static int hclge_alloc_tqps(struct hclge_dev *hdev)
947 {
948         struct hclge_tqp *tqp;
949         int i;
950
951         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
952                                   sizeof(struct hclge_tqp), GFP_KERNEL);
953         if (!hdev->htqp)
954                 return -ENOMEM;
955
956         tqp = hdev->htqp;
957
958         for (i = 0; i < hdev->num_tqps; i++) {
959                 tqp->dev = &hdev->pdev->dev;
960                 tqp->index = i;
961
962                 tqp->q.ae_algo = &ae_algo;
963                 tqp->q.buf_size = hdev->rx_buf_len;
964                 tqp->q.desc_num = hdev->num_desc;
965                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
966                         i * HCLGE_TQP_REG_SIZE;
967
968                 tqp++;
969         }
970
971         return 0;
972 }
973
974 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
975                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
976 {
977         struct hclge_tqp_map_cmd *req;
978         struct hclge_desc desc;
979         int ret;
980
981         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
982
983         req = (struct hclge_tqp_map_cmd *)desc.data;
984         req->tqp_id = cpu_to_le16(tqp_pid);
985         req->tqp_vf = func_id;
986         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
987                         1 << HCLGE_TQP_MAP_EN_B;
988         req->tqp_vid = cpu_to_le16(tqp_vid);
989
990         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
991         if (ret)
992                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
993
994         return ret;
995 }
996
997 static int  hclge_assign_tqp(struct hclge_vport *vport)
998 {
999         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1000         struct hclge_dev *hdev = vport->back;
1001         int i, alloced;
1002
1003         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1004              alloced < kinfo->num_tqps; i++) {
1005                 if (!hdev->htqp[i].alloced) {
1006                         hdev->htqp[i].q.handle = &vport->nic;
1007                         hdev->htqp[i].q.tqp_index = alloced;
1008                         hdev->htqp[i].q.desc_num = kinfo->num_desc;
1009                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1010                         hdev->htqp[i].alloced = true;
1011                         alloced++;
1012                 }
1013         }
1014         vport->alloc_tqps = kinfo->num_tqps;
1015
1016         return 0;
1017 }
1018
1019 static int hclge_knic_setup(struct hclge_vport *vport,
1020                             u16 num_tqps, u16 num_desc)
1021 {
1022         struct hnae3_handle *nic = &vport->nic;
1023         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1024         struct hclge_dev *hdev = vport->back;
1025         int i, ret;
1026
1027         kinfo->num_desc = num_desc;
1028         kinfo->rx_buf_len = hdev->rx_buf_len;
1029         kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1030         kinfo->rss_size
1031                 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1032         kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1033
1034         for (i = 0; i < HNAE3_MAX_TC; i++) {
1035                 if (hdev->hw_tc_map & BIT(i)) {
1036                         kinfo->tc_info[i].enable = true;
1037                         kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1038                         kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1039                         kinfo->tc_info[i].tc = i;
1040                 } else {
1041                         /* Set to default queue if TC is disable */
1042                         kinfo->tc_info[i].enable = false;
1043                         kinfo->tc_info[i].tqp_offset = 0;
1044                         kinfo->tc_info[i].tqp_count = 1;
1045                         kinfo->tc_info[i].tc = 0;
1046                 }
1047         }
1048
1049         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1050                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1051         if (!kinfo->tqp)
1052                 return -ENOMEM;
1053
1054         ret = hclge_assign_tqp(vport);
1055         if (ret)
1056                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1057
1058         return ret;
1059 }
1060
1061 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1062                                   struct hclge_vport *vport)
1063 {
1064         struct hnae3_handle *nic = &vport->nic;
1065         struct hnae3_knic_private_info *kinfo;
1066         u16 i;
1067
1068         kinfo = &nic->kinfo;
1069         for (i = 0; i < kinfo->num_tqps; i++) {
1070                 struct hclge_tqp *q =
1071                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1072                 bool is_pf;
1073                 int ret;
1074
1075                 is_pf = !(vport->vport_id);
1076                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1077                                              i, is_pf);
1078                 if (ret)
1079                         return ret;
1080         }
1081
1082         return 0;
1083 }
1084
1085 static int hclge_map_tqp(struct hclge_dev *hdev)
1086 {
1087         struct hclge_vport *vport = hdev->vport;
1088         u16 i, num_vport;
1089
1090         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1091         for (i = 0; i < num_vport; i++) {
1092                 int ret;
1093
1094                 ret = hclge_map_tqp_to_vport(hdev, vport);
1095                 if (ret)
1096                         return ret;
1097
1098                 vport++;
1099         }
1100
1101         return 0;
1102 }
1103
1104 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1105 {
1106         /* this would be initialized later */
1107 }
1108
1109 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1110 {
1111         struct hnae3_handle *nic = &vport->nic;
1112         struct hclge_dev *hdev = vport->back;
1113         int ret;
1114
1115         nic->pdev = hdev->pdev;
1116         nic->ae_algo = &ae_algo;
1117         nic->numa_node_mask = hdev->numa_node_mask;
1118
1119         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1120                 ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1121                 if (ret) {
1122                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1123                                 ret);
1124                         return ret;
1125                 }
1126         } else {
1127                 hclge_unic_setup(vport, num_tqps);
1128         }
1129
1130         return 0;
1131 }
1132
1133 static int hclge_alloc_vport(struct hclge_dev *hdev)
1134 {
1135         struct pci_dev *pdev = hdev->pdev;
1136         struct hclge_vport *vport;
1137         u32 tqp_main_vport;
1138         u32 tqp_per_vport;
1139         int num_vport, i;
1140         int ret;
1141
1142         /* We need to alloc a vport for main NIC of PF */
1143         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1144
1145         if (hdev->num_tqps < num_vport) {
1146                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1147                         hdev->num_tqps, num_vport);
1148                 return -EINVAL;
1149         }
1150
1151         /* Alloc the same number of TQPs for every vport */
1152         tqp_per_vport = hdev->num_tqps / num_vport;
1153         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1154
1155         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1156                              GFP_KERNEL);
1157         if (!vport)
1158                 return -ENOMEM;
1159
1160         hdev->vport = vport;
1161         hdev->num_alloc_vport = num_vport;
1162
1163         if (IS_ENABLED(CONFIG_PCI_IOV))
1164                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1165
1166         for (i = 0; i < num_vport; i++) {
1167                 vport->back = hdev;
1168                 vport->vport_id = i;
1169                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1170
1171                 if (i == 0)
1172                         ret = hclge_vport_setup(vport, tqp_main_vport);
1173                 else
1174                         ret = hclge_vport_setup(vport, tqp_per_vport);
1175                 if (ret) {
1176                         dev_err(&pdev->dev,
1177                                 "vport setup failed for vport %d, %d\n",
1178                                 i, ret);
1179                         return ret;
1180                 }
1181
1182                 vport++;
1183         }
1184
1185         return 0;
1186 }
1187
1188 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1189                                     struct hclge_pkt_buf_alloc *buf_alloc)
1190 {
1191 /* TX buffer size is unit by 128 byte */
1192 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1193 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1194         struct hclge_tx_buff_alloc_cmd *req;
1195         struct hclge_desc desc;
1196         int ret;
1197         u8 i;
1198
1199         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1200
1201         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1202         for (i = 0; i < HCLGE_TC_NUM; i++) {
1203                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1204
1205                 req->tx_pkt_buff[i] =
1206                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1207                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1208         }
1209
1210         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1211         if (ret)
1212                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1213                         ret);
1214
1215         return ret;
1216 }
1217
1218 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1219                                  struct hclge_pkt_buf_alloc *buf_alloc)
1220 {
1221         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1222
1223         if (ret)
1224                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1225
1226         return ret;
1227 }
1228
1229 static int hclge_get_tc_num(struct hclge_dev *hdev)
1230 {
1231         int i, cnt = 0;
1232
1233         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1234                 if (hdev->hw_tc_map & BIT(i))
1235                         cnt++;
1236         return cnt;
1237 }
1238
1239 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1240 {
1241         int i, cnt = 0;
1242
1243         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1244                 if (hdev->hw_tc_map & BIT(i) &&
1245                     hdev->tm_info.hw_pfc_map & BIT(i))
1246                         cnt++;
1247         return cnt;
1248 }
1249
1250 /* Get the number of pfc enabled TCs, which have private buffer */
1251 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1252                                   struct hclge_pkt_buf_alloc *buf_alloc)
1253 {
1254         struct hclge_priv_buf *priv;
1255         int i, cnt = 0;
1256
1257         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1258                 priv = &buf_alloc->priv_buf[i];
1259                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1260                     priv->enable)
1261                         cnt++;
1262         }
1263
1264         return cnt;
1265 }
1266
1267 /* Get the number of pfc disabled TCs, which have private buffer */
1268 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1269                                      struct hclge_pkt_buf_alloc *buf_alloc)
1270 {
1271         struct hclge_priv_buf *priv;
1272         int i, cnt = 0;
1273
1274         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1275                 priv = &buf_alloc->priv_buf[i];
1276                 if (hdev->hw_tc_map & BIT(i) &&
1277                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1278                     priv->enable)
1279                         cnt++;
1280         }
1281
1282         return cnt;
1283 }
1284
1285 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1286 {
1287         struct hclge_priv_buf *priv;
1288         u32 rx_priv = 0;
1289         int i;
1290
1291         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1292                 priv = &buf_alloc->priv_buf[i];
1293                 if (priv->enable)
1294                         rx_priv += priv->buf_size;
1295         }
1296         return rx_priv;
1297 }
1298
1299 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1300 {
1301         u32 i, total_tx_size = 0;
1302
1303         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1304                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1305
1306         return total_tx_size;
1307 }
1308
1309 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1310                                 struct hclge_pkt_buf_alloc *buf_alloc,
1311                                 u32 rx_all)
1312 {
1313         u32 shared_buf_min, shared_buf_tc, shared_std;
1314         int tc_num, pfc_enable_num;
1315         u32 shared_buf;
1316         u32 rx_priv;
1317         int i;
1318
1319         tc_num = hclge_get_tc_num(hdev);
1320         pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1321
1322         if (hnae3_dev_dcb_supported(hdev))
1323                 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1324         else
1325                 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1326
1327         shared_buf_tc = pfc_enable_num * hdev->mps +
1328                         (tc_num - pfc_enable_num) * hdev->mps / 2 +
1329                         hdev->mps;
1330         shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1331
1332         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1333         if (rx_all <= rx_priv + shared_std)
1334                 return false;
1335
1336         shared_buf = rx_all - rx_priv;
1337         buf_alloc->s_buf.buf_size = shared_buf;
1338         buf_alloc->s_buf.self.high = shared_buf;
1339         buf_alloc->s_buf.self.low =  2 * hdev->mps;
1340
1341         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1342                 if ((hdev->hw_tc_map & BIT(i)) &&
1343                     (hdev->tm_info.hw_pfc_map & BIT(i))) {
1344                         buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1345                         buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1346                 } else {
1347                         buf_alloc->s_buf.tc_thrd[i].low = 0;
1348                         buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1349                 }
1350         }
1351
1352         return true;
1353 }
1354
1355 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1356                                 struct hclge_pkt_buf_alloc *buf_alloc)
1357 {
1358         u32 i, total_size;
1359
1360         total_size = hdev->pkt_buf_size;
1361
1362         /* alloc tx buffer for all enabled tc */
1363         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1364                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1365
1366                 if (total_size < HCLGE_DEFAULT_TX_BUF)
1367                         return -ENOMEM;
1368
1369                 if (hdev->hw_tc_map & BIT(i))
1370                         priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1371                 else
1372                         priv->tx_buf_size = 0;
1373
1374                 total_size -= priv->tx_buf_size;
1375         }
1376
1377         return 0;
1378 }
1379
1380 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1381  * @hdev: pointer to struct hclge_dev
1382  * @buf_alloc: pointer to buffer calculation data
1383  * @return: 0: calculate sucessful, negative: fail
1384  */
1385 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1386                                 struct hclge_pkt_buf_alloc *buf_alloc)
1387 {
1388 #define HCLGE_BUF_SIZE_UNIT     128
1389         u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1390         int no_pfc_priv_num, pfc_priv_num;
1391         struct hclge_priv_buf *priv;
1392         int i;
1393
1394         aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1395         rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1396
1397         /* When DCB is not supported, rx private
1398          * buffer is not allocated.
1399          */
1400         if (!hnae3_dev_dcb_supported(hdev)) {
1401                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1402                         return -ENOMEM;
1403
1404                 return 0;
1405         }
1406
1407         /* step 1, try to alloc private buffer for all enabled tc */
1408         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1409                 priv = &buf_alloc->priv_buf[i];
1410                 if (hdev->hw_tc_map & BIT(i)) {
1411                         priv->enable = 1;
1412                         if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1413                                 priv->wl.low = aligned_mps;
1414                                 priv->wl.high = priv->wl.low + aligned_mps;
1415                                 priv->buf_size = priv->wl.high +
1416                                                 HCLGE_DEFAULT_DV;
1417                         } else {
1418                                 priv->wl.low = 0;
1419                                 priv->wl.high = 2 * aligned_mps;
1420                                 priv->buf_size = priv->wl.high;
1421                         }
1422                 } else {
1423                         priv->enable = 0;
1424                         priv->wl.low = 0;
1425                         priv->wl.high = 0;
1426                         priv->buf_size = 0;
1427                 }
1428         }
1429
1430         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1431                 return 0;
1432
1433         /* step 2, try to decrease the buffer size of
1434          * no pfc TC's private buffer
1435          */
1436         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1437                 priv = &buf_alloc->priv_buf[i];
1438
1439                 priv->enable = 0;
1440                 priv->wl.low = 0;
1441                 priv->wl.high = 0;
1442                 priv->buf_size = 0;
1443
1444                 if (!(hdev->hw_tc_map & BIT(i)))
1445                         continue;
1446
1447                 priv->enable = 1;
1448
1449                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1450                         priv->wl.low = 128;
1451                         priv->wl.high = priv->wl.low + aligned_mps;
1452                         priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1453                 } else {
1454                         priv->wl.low = 0;
1455                         priv->wl.high = aligned_mps;
1456                         priv->buf_size = priv->wl.high;
1457                 }
1458         }
1459
1460         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1461                 return 0;
1462
1463         /* step 3, try to reduce the number of pfc disabled TCs,
1464          * which have private buffer
1465          */
1466         /* get the total no pfc enable TC number, which have private buffer */
1467         no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1468
1469         /* let the last to be cleared first */
1470         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1471                 priv = &buf_alloc->priv_buf[i];
1472
1473                 if (hdev->hw_tc_map & BIT(i) &&
1474                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1475                         /* Clear the no pfc TC private buffer */
1476                         priv->wl.low = 0;
1477                         priv->wl.high = 0;
1478                         priv->buf_size = 0;
1479                         priv->enable = 0;
1480                         no_pfc_priv_num--;
1481                 }
1482
1483                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1484                     no_pfc_priv_num == 0)
1485                         break;
1486         }
1487
1488         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1489                 return 0;
1490
1491         /* step 4, try to reduce the number of pfc enabled TCs
1492          * which have private buffer.
1493          */
1494         pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1495
1496         /* let the last to be cleared first */
1497         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1498                 priv = &buf_alloc->priv_buf[i];
1499
1500                 if (hdev->hw_tc_map & BIT(i) &&
1501                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1502                         /* Reduce the number of pfc TC with private buffer */
1503                         priv->wl.low = 0;
1504                         priv->enable = 0;
1505                         priv->wl.high = 0;
1506                         priv->buf_size = 0;
1507                         pfc_priv_num--;
1508                 }
1509
1510                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1511                     pfc_priv_num == 0)
1512                         break;
1513         }
1514         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1515                 return 0;
1516
1517         return -ENOMEM;
1518 }
1519
1520 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1521                                    struct hclge_pkt_buf_alloc *buf_alloc)
1522 {
1523         struct hclge_rx_priv_buff_cmd *req;
1524         struct hclge_desc desc;
1525         int ret;
1526         int i;
1527
1528         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1529         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1530
1531         /* Alloc private buffer TCs */
1532         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1533                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1534
1535                 req->buf_num[i] =
1536                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1537                 req->buf_num[i] |=
1538                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1539         }
1540
1541         req->shared_buf =
1542                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1543                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1544
1545         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1546         if (ret)
1547                 dev_err(&hdev->pdev->dev,
1548                         "rx private buffer alloc cmd failed %d\n", ret);
1549
1550         return ret;
1551 }
1552
1553 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1554                                    struct hclge_pkt_buf_alloc *buf_alloc)
1555 {
1556         struct hclge_rx_priv_wl_buf *req;
1557         struct hclge_priv_buf *priv;
1558         struct hclge_desc desc[2];
1559         int i, j;
1560         int ret;
1561
1562         for (i = 0; i < 2; i++) {
1563                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1564                                            false);
1565                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1566
1567                 /* The first descriptor set the NEXT bit to 1 */
1568                 if (i == 0)
1569                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1570                 else
1571                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1572
1573                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1574                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1575
1576                         priv = &buf_alloc->priv_buf[idx];
1577                         req->tc_wl[j].high =
1578                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1579                         req->tc_wl[j].high |=
1580                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1581                         req->tc_wl[j].low =
1582                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1583                         req->tc_wl[j].low |=
1584                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1585                 }
1586         }
1587
1588         /* Send 2 descriptor at one time */
1589         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1590         if (ret)
1591                 dev_err(&hdev->pdev->dev,
1592                         "rx private waterline config cmd failed %d\n",
1593                         ret);
1594         return ret;
1595 }
1596
1597 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1598                                     struct hclge_pkt_buf_alloc *buf_alloc)
1599 {
1600         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1601         struct hclge_rx_com_thrd *req;
1602         struct hclge_desc desc[2];
1603         struct hclge_tc_thrd *tc;
1604         int i, j;
1605         int ret;
1606
1607         for (i = 0; i < 2; i++) {
1608                 hclge_cmd_setup_basic_desc(&desc[i],
1609                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1610                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1611
1612                 /* The first descriptor set the NEXT bit to 1 */
1613                 if (i == 0)
1614                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1615                 else
1616                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1617
1618                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1619                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1620
1621                         req->com_thrd[j].high =
1622                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1623                         req->com_thrd[j].high |=
1624                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1625                         req->com_thrd[j].low =
1626                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1627                         req->com_thrd[j].low |=
1628                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1629                 }
1630         }
1631
1632         /* Send 2 descriptors at one time */
1633         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1634         if (ret)
1635                 dev_err(&hdev->pdev->dev,
1636                         "common threshold config cmd failed %d\n", ret);
1637         return ret;
1638 }
1639
1640 static int hclge_common_wl_config(struct hclge_dev *hdev,
1641                                   struct hclge_pkt_buf_alloc *buf_alloc)
1642 {
1643         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1644         struct hclge_rx_com_wl *req;
1645         struct hclge_desc desc;
1646         int ret;
1647
1648         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1649
1650         req = (struct hclge_rx_com_wl *)desc.data;
1651         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1652         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1653
1654         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1655         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1656
1657         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1658         if (ret)
1659                 dev_err(&hdev->pdev->dev,
1660                         "common waterline config cmd failed %d\n", ret);
1661
1662         return ret;
1663 }
1664
1665 int hclge_buffer_alloc(struct hclge_dev *hdev)
1666 {
1667         struct hclge_pkt_buf_alloc *pkt_buf;
1668         int ret;
1669
1670         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1671         if (!pkt_buf)
1672                 return -ENOMEM;
1673
1674         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1675         if (ret) {
1676                 dev_err(&hdev->pdev->dev,
1677                         "could not calc tx buffer size for all TCs %d\n", ret);
1678                 goto out;
1679         }
1680
1681         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1682         if (ret) {
1683                 dev_err(&hdev->pdev->dev,
1684                         "could not alloc tx buffers %d\n", ret);
1685                 goto out;
1686         }
1687
1688         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1689         if (ret) {
1690                 dev_err(&hdev->pdev->dev,
1691                         "could not calc rx priv buffer size for all TCs %d\n",
1692                         ret);
1693                 goto out;
1694         }
1695
1696         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1697         if (ret) {
1698                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1699                         ret);
1700                 goto out;
1701         }
1702
1703         if (hnae3_dev_dcb_supported(hdev)) {
1704                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1705                 if (ret) {
1706                         dev_err(&hdev->pdev->dev,
1707                                 "could not configure rx private waterline %d\n",
1708                                 ret);
1709                         goto out;
1710                 }
1711
1712                 ret = hclge_common_thrd_config(hdev, pkt_buf);
1713                 if (ret) {
1714                         dev_err(&hdev->pdev->dev,
1715                                 "could not configure common threshold %d\n",
1716                                 ret);
1717                         goto out;
1718                 }
1719         }
1720
1721         ret = hclge_common_wl_config(hdev, pkt_buf);
1722         if (ret)
1723                 dev_err(&hdev->pdev->dev,
1724                         "could not configure common waterline %d\n", ret);
1725
1726 out:
1727         kfree(pkt_buf);
1728         return ret;
1729 }
1730
1731 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1732 {
1733         struct hnae3_handle *roce = &vport->roce;
1734         struct hnae3_handle *nic = &vport->nic;
1735
1736         roce->rinfo.num_vectors = vport->back->num_roce_msi;
1737
1738         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1739             vport->back->num_msi_left == 0)
1740                 return -EINVAL;
1741
1742         roce->rinfo.base_vector = vport->back->roce_base_vector;
1743
1744         roce->rinfo.netdev = nic->kinfo.netdev;
1745         roce->rinfo.roce_io_base = vport->back->hw.io_base;
1746
1747         roce->pdev = nic->pdev;
1748         roce->ae_algo = nic->ae_algo;
1749         roce->numa_node_mask = nic->numa_node_mask;
1750
1751         return 0;
1752 }
1753
1754 static int hclge_init_msi(struct hclge_dev *hdev)
1755 {
1756         struct pci_dev *pdev = hdev->pdev;
1757         int vectors;
1758         int i;
1759
1760         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1761                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
1762         if (vectors < 0) {
1763                 dev_err(&pdev->dev,
1764                         "failed(%d) to allocate MSI/MSI-X vectors\n",
1765                         vectors);
1766                 return vectors;
1767         }
1768         if (vectors < hdev->num_msi)
1769                 dev_warn(&hdev->pdev->dev,
1770                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1771                          hdev->num_msi, vectors);
1772
1773         hdev->num_msi = vectors;
1774         hdev->num_msi_left = vectors;
1775         hdev->base_msi_vector = pdev->irq;
1776         hdev->roce_base_vector = hdev->base_msi_vector +
1777                                 hdev->roce_base_msix_offset;
1778
1779         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1780                                            sizeof(u16), GFP_KERNEL);
1781         if (!hdev->vector_status) {
1782                 pci_free_irq_vectors(pdev);
1783                 return -ENOMEM;
1784         }
1785
1786         for (i = 0; i < hdev->num_msi; i++)
1787                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1788
1789         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1790                                         sizeof(int), GFP_KERNEL);
1791         if (!hdev->vector_irq) {
1792                 pci_free_irq_vectors(pdev);
1793                 return -ENOMEM;
1794         }
1795
1796         return 0;
1797 }
1798
1799 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1800 {
1801
1802         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1803                 duplex = HCLGE_MAC_FULL;
1804
1805         return duplex;
1806 }
1807
1808 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1809                                       u8 duplex)
1810 {
1811         struct hclge_config_mac_speed_dup_cmd *req;
1812         struct hclge_desc desc;
1813         int ret;
1814
1815         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1816
1817         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1818
1819         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1820
1821         switch (speed) {
1822         case HCLGE_MAC_SPEED_10M:
1823                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1824                                 HCLGE_CFG_SPEED_S, 6);
1825                 break;
1826         case HCLGE_MAC_SPEED_100M:
1827                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1828                                 HCLGE_CFG_SPEED_S, 7);
1829                 break;
1830         case HCLGE_MAC_SPEED_1G:
1831                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1832                                 HCLGE_CFG_SPEED_S, 0);
1833                 break;
1834         case HCLGE_MAC_SPEED_10G:
1835                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1836                                 HCLGE_CFG_SPEED_S, 1);
1837                 break;
1838         case HCLGE_MAC_SPEED_25G:
1839                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1840                                 HCLGE_CFG_SPEED_S, 2);
1841                 break;
1842         case HCLGE_MAC_SPEED_40G:
1843                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1844                                 HCLGE_CFG_SPEED_S, 3);
1845                 break;
1846         case HCLGE_MAC_SPEED_50G:
1847                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1848                                 HCLGE_CFG_SPEED_S, 4);
1849                 break;
1850         case HCLGE_MAC_SPEED_100G:
1851                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1852                                 HCLGE_CFG_SPEED_S, 5);
1853                 break;
1854         default:
1855                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1856                 return -EINVAL;
1857         }
1858
1859         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1860                       1);
1861
1862         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1863         if (ret) {
1864                 dev_err(&hdev->pdev->dev,
1865                         "mac speed/duplex config cmd failed %d.\n", ret);
1866                 return ret;
1867         }
1868
1869         return 0;
1870 }
1871
1872 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1873 {
1874         int ret;
1875
1876         duplex = hclge_check_speed_dup(duplex, speed);
1877         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
1878                 return 0;
1879
1880         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
1881         if (ret)
1882                 return ret;
1883
1884         hdev->hw.mac.speed = speed;
1885         hdev->hw.mac.duplex = duplex;
1886
1887         return 0;
1888 }
1889
1890 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
1891                                      u8 duplex)
1892 {
1893         struct hclge_vport *vport = hclge_get_vport(handle);
1894         struct hclge_dev *hdev = vport->back;
1895
1896         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
1897 }
1898
1899 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
1900 {
1901         struct hclge_config_auto_neg_cmd *req;
1902         struct hclge_desc desc;
1903         u32 flag = 0;
1904         int ret;
1905
1906         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
1907
1908         req = (struct hclge_config_auto_neg_cmd *)desc.data;
1909         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
1910         req->cfg_an_cmd_flag = cpu_to_le32(flag);
1911
1912         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1913         if (ret)
1914                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
1915                         ret);
1916
1917         return ret;
1918 }
1919
1920 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
1921 {
1922         struct hclge_vport *vport = hclge_get_vport(handle);
1923         struct hclge_dev *hdev = vport->back;
1924
1925         return hclge_set_autoneg_en(hdev, enable);
1926 }
1927
1928 static int hclge_get_autoneg(struct hnae3_handle *handle)
1929 {
1930         struct hclge_vport *vport = hclge_get_vport(handle);
1931         struct hclge_dev *hdev = vport->back;
1932         struct phy_device *phydev = hdev->hw.mac.phydev;
1933
1934         if (phydev)
1935                 return phydev->autoneg;
1936
1937         return hdev->hw.mac.autoneg;
1938 }
1939
1940 static int hclge_mac_init(struct hclge_dev *hdev)
1941 {
1942         struct hclge_mac *mac = &hdev->hw.mac;
1943         int ret;
1944
1945         hdev->support_sfp_query = true;
1946         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
1947         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
1948                                          hdev->hw.mac.duplex);
1949         if (ret) {
1950                 dev_err(&hdev->pdev->dev,
1951                         "Config mac speed dup fail ret=%d\n", ret);
1952                 return ret;
1953         }
1954
1955         mac->link = 0;
1956
1957         ret = hclge_set_mac_mtu(hdev, hdev->mps);
1958         if (ret) {
1959                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
1960                 return ret;
1961         }
1962
1963         ret = hclge_buffer_alloc(hdev);
1964         if (ret)
1965                 dev_err(&hdev->pdev->dev,
1966                         "allocate buffer fail, ret=%d\n", ret);
1967
1968         return ret;
1969 }
1970
1971 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
1972 {
1973         if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
1974                 schedule_work(&hdev->mbx_service_task);
1975 }
1976
1977 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
1978 {
1979         if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
1980                 schedule_work(&hdev->rst_service_task);
1981 }
1982
1983 static void hclge_task_schedule(struct hclge_dev *hdev)
1984 {
1985         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
1986             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
1987             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
1988                 (void)schedule_work(&hdev->service_task);
1989 }
1990
1991 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
1992 {
1993         struct hclge_link_status_cmd *req;
1994         struct hclge_desc desc;
1995         int link_status;
1996         int ret;
1997
1998         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
1999         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2000         if (ret) {
2001                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2002                         ret);
2003                 return ret;
2004         }
2005
2006         req = (struct hclge_link_status_cmd *)desc.data;
2007         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2008
2009         return !!link_status;
2010 }
2011
2012 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2013 {
2014         int mac_state;
2015         int link_stat;
2016
2017         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2018                 return 0;
2019
2020         mac_state = hclge_get_mac_link_status(hdev);
2021
2022         if (hdev->hw.mac.phydev) {
2023                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2024                         link_stat = mac_state &
2025                                 hdev->hw.mac.phydev->link;
2026                 else
2027                         link_stat = 0;
2028
2029         } else {
2030                 link_stat = mac_state;
2031         }
2032
2033         return !!link_stat;
2034 }
2035
2036 static void hclge_update_link_status(struct hclge_dev *hdev)
2037 {
2038         struct hnae3_client *client = hdev->nic_client;
2039         struct hnae3_handle *handle;
2040         int state;
2041         int i;
2042
2043         if (!client)
2044                 return;
2045         state = hclge_get_mac_phy_link(hdev);
2046         if (state != hdev->hw.mac.link) {
2047                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2048                         handle = &hdev->vport[i].nic;
2049                         client->ops->link_status_change(handle, state);
2050                 }
2051                 hdev->hw.mac.link = state;
2052         }
2053 }
2054
2055 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2056 {
2057         struct hclge_sfp_speed_cmd *resp = NULL;
2058         struct hclge_desc desc;
2059         int ret;
2060
2061         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2062         resp = (struct hclge_sfp_speed_cmd *)desc.data;
2063         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2064         if (ret == -EOPNOTSUPP) {
2065                 dev_warn(&hdev->pdev->dev,
2066                          "IMP do not support get SFP speed %d\n", ret);
2067                 return ret;
2068         } else if (ret) {
2069                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2070                 return ret;
2071         }
2072
2073         *speed = resp->sfp_speed;
2074
2075         return 0;
2076 }
2077
2078 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2079 {
2080         struct hclge_mac mac = hdev->hw.mac;
2081         int speed;
2082         int ret;
2083
2084         /* get the speed from SFP cmd when phy
2085          * doesn't exit.
2086          */
2087         if (mac.phydev)
2088                 return 0;
2089
2090         /* if IMP does not support get SFP/qSFP speed, return directly */
2091         if (!hdev->support_sfp_query)
2092                 return 0;
2093
2094         ret = hclge_get_sfp_speed(hdev, &speed);
2095         if (ret == -EOPNOTSUPP) {
2096                 hdev->support_sfp_query = false;
2097                 return ret;
2098         } else if (ret) {
2099                 return ret;
2100         }
2101
2102         if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2103                 return 0; /* do nothing if no SFP */
2104
2105         /* must config full duplex for SFP */
2106         return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2107 }
2108
2109 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2110 {
2111         struct hclge_vport *vport = hclge_get_vport(handle);
2112         struct hclge_dev *hdev = vport->back;
2113
2114         return hclge_update_speed_duplex(hdev);
2115 }
2116
2117 static int hclge_get_status(struct hnae3_handle *handle)
2118 {
2119         struct hclge_vport *vport = hclge_get_vport(handle);
2120         struct hclge_dev *hdev = vport->back;
2121
2122         hclge_update_link_status(hdev);
2123
2124         return hdev->hw.mac.link;
2125 }
2126
2127 static void hclge_service_timer(struct timer_list *t)
2128 {
2129         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2130
2131         mod_timer(&hdev->service_timer, jiffies + HZ);
2132         hdev->hw_stats.stats_timer++;
2133         hclge_task_schedule(hdev);
2134 }
2135
2136 static void hclge_service_complete(struct hclge_dev *hdev)
2137 {
2138         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2139
2140         /* Flush memory before next watchdog */
2141         smp_mb__before_atomic();
2142         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2143 }
2144
2145 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2146 {
2147         u32 rst_src_reg;
2148         u32 cmdq_src_reg;
2149
2150         /* fetch the events from their corresponding regs */
2151         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2152         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2153
2154         /* Assumption: If by any chance reset and mailbox events are reported
2155          * together then we will only process reset event in this go and will
2156          * defer the processing of the mailbox events. Since, we would have not
2157          * cleared RX CMDQ event this time we would receive again another
2158          * interrupt from H/W just for the mailbox.
2159          */
2160
2161         /* check for vector0 reset event sources */
2162         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2163                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2164                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2165                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2166                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2167                 return HCLGE_VECTOR0_EVENT_RST;
2168         }
2169
2170         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2171                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2172                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2173                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2174                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2175                 return HCLGE_VECTOR0_EVENT_RST;
2176         }
2177
2178         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2179                 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2180                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2181                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2182                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2183                 return HCLGE_VECTOR0_EVENT_RST;
2184         }
2185
2186         /* check for vector0 mailbox(=CMDQ RX) event source */
2187         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2188                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2189                 *clearval = cmdq_src_reg;
2190                 return HCLGE_VECTOR0_EVENT_MBX;
2191         }
2192
2193         return HCLGE_VECTOR0_EVENT_OTHER;
2194 }
2195
2196 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2197                                     u32 regclr)
2198 {
2199         switch (event_type) {
2200         case HCLGE_VECTOR0_EVENT_RST:
2201                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2202                 break;
2203         case HCLGE_VECTOR0_EVENT_MBX:
2204                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2205                 break;
2206         default:
2207                 break;
2208         }
2209 }
2210
2211 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2212 {
2213         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2214                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2215                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2216                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2217         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2218 }
2219
2220 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2221 {
2222         writel(enable ? 1 : 0, vector->addr);
2223 }
2224
2225 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2226 {
2227         struct hclge_dev *hdev = data;
2228         u32 event_cause;
2229         u32 clearval;
2230
2231         hclge_enable_vector(&hdev->misc_vector, false);
2232         event_cause = hclge_check_event_cause(hdev, &clearval);
2233
2234         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2235         switch (event_cause) {
2236         case HCLGE_VECTOR0_EVENT_RST:
2237                 hclge_reset_task_schedule(hdev);
2238                 break;
2239         case HCLGE_VECTOR0_EVENT_MBX:
2240                 /* If we are here then,
2241                  * 1. Either we are not handling any mbx task and we are not
2242                  *    scheduled as well
2243                  *                        OR
2244                  * 2. We could be handling a mbx task but nothing more is
2245                  *    scheduled.
2246                  * In both cases, we should schedule mbx task as there are more
2247                  * mbx messages reported by this interrupt.
2248                  */
2249                 hclge_mbx_task_schedule(hdev);
2250                 break;
2251         default:
2252                 dev_warn(&hdev->pdev->dev,
2253                          "received unknown or unhandled event of vector0\n");
2254                 break;
2255         }
2256
2257         /* clear the source of interrupt if it is not cause by reset */
2258         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2259                 hclge_clear_event_cause(hdev, event_cause, clearval);
2260                 hclge_enable_vector(&hdev->misc_vector, true);
2261         }
2262
2263         return IRQ_HANDLED;
2264 }
2265
2266 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2267 {
2268         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2269                 dev_warn(&hdev->pdev->dev,
2270                          "vector(vector_id %d) has been freed.\n", vector_id);
2271                 return;
2272         }
2273
2274         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2275         hdev->num_msi_left += 1;
2276         hdev->num_msi_used -= 1;
2277 }
2278
2279 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2280 {
2281         struct hclge_misc_vector *vector = &hdev->misc_vector;
2282
2283         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2284
2285         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2286         hdev->vector_status[0] = 0;
2287
2288         hdev->num_msi_left -= 1;
2289         hdev->num_msi_used += 1;
2290 }
2291
2292 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2293 {
2294         int ret;
2295
2296         hclge_get_misc_vector(hdev);
2297
2298         /* this would be explicitly freed in the end */
2299         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2300                           0, "hclge_misc", hdev);
2301         if (ret) {
2302                 hclge_free_vector(hdev, 0);
2303                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2304                         hdev->misc_vector.vector_irq);
2305         }
2306
2307         return ret;
2308 }
2309
2310 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2311 {
2312         free_irq(hdev->misc_vector.vector_irq, hdev);
2313         hclge_free_vector(hdev, 0);
2314 }
2315
2316 static int hclge_notify_client(struct hclge_dev *hdev,
2317                                enum hnae3_reset_notify_type type)
2318 {
2319         struct hnae3_client *client = hdev->nic_client;
2320         u16 i;
2321
2322         if (!client->ops->reset_notify)
2323                 return -EOPNOTSUPP;
2324
2325         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2326                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2327                 int ret;
2328
2329                 ret = client->ops->reset_notify(handle, type);
2330                 if (ret) {
2331                         dev_err(&hdev->pdev->dev,
2332                                 "notify nic client failed %d(%d)\n", type, ret);
2333                         return ret;
2334                 }
2335         }
2336
2337         return 0;
2338 }
2339
2340 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2341                                     enum hnae3_reset_notify_type type)
2342 {
2343         struct hnae3_client *client = hdev->roce_client;
2344         int ret = 0;
2345         u16 i;
2346
2347         if (!client)
2348                 return 0;
2349
2350         if (!client->ops->reset_notify)
2351                 return -EOPNOTSUPP;
2352
2353         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2354                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2355
2356                 ret = client->ops->reset_notify(handle, type);
2357                 if (ret) {
2358                         dev_err(&hdev->pdev->dev,
2359                                 "notify roce client failed %d(%d)",
2360                                 type, ret);
2361                         return ret;
2362                 }
2363         }
2364
2365         return ret;
2366 }
2367
2368 static int hclge_reset_wait(struct hclge_dev *hdev)
2369 {
2370 #define HCLGE_RESET_WATI_MS     100
2371 #define HCLGE_RESET_WAIT_CNT    200
2372         u32 val, reg, reg_bit;
2373         u32 cnt = 0;
2374
2375         switch (hdev->reset_type) {
2376         case HNAE3_IMP_RESET:
2377                 reg = HCLGE_GLOBAL_RESET_REG;
2378                 reg_bit = HCLGE_IMP_RESET_BIT;
2379                 break;
2380         case HNAE3_GLOBAL_RESET:
2381                 reg = HCLGE_GLOBAL_RESET_REG;
2382                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2383                 break;
2384         case HNAE3_CORE_RESET:
2385                 reg = HCLGE_GLOBAL_RESET_REG;
2386                 reg_bit = HCLGE_CORE_RESET_BIT;
2387                 break;
2388         case HNAE3_FUNC_RESET:
2389                 reg = HCLGE_FUN_RST_ING;
2390                 reg_bit = HCLGE_FUN_RST_ING_B;
2391                 break;
2392         case HNAE3_FLR_RESET:
2393                 break;
2394         default:
2395                 dev_err(&hdev->pdev->dev,
2396                         "Wait for unsupported reset type: %d\n",
2397                         hdev->reset_type);
2398                 return -EINVAL;
2399         }
2400
2401         if (hdev->reset_type == HNAE3_FLR_RESET) {
2402                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2403                        cnt++ < HCLGE_RESET_WAIT_CNT)
2404                         msleep(HCLGE_RESET_WATI_MS);
2405
2406                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2407                         dev_err(&hdev->pdev->dev,
2408                                 "flr wait timeout: %d\n", cnt);
2409                         return -EBUSY;
2410                 }
2411
2412                 return 0;
2413         }
2414
2415         val = hclge_read_dev(&hdev->hw, reg);
2416         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2417                 msleep(HCLGE_RESET_WATI_MS);
2418                 val = hclge_read_dev(&hdev->hw, reg);
2419                 cnt++;
2420         }
2421
2422         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2423                 dev_warn(&hdev->pdev->dev,
2424                          "Wait for reset timeout: %d\n", hdev->reset_type);
2425                 return -EBUSY;
2426         }
2427
2428         return 0;
2429 }
2430
2431 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2432 {
2433         struct hclge_vf_rst_cmd *req;
2434         struct hclge_desc desc;
2435
2436         req = (struct hclge_vf_rst_cmd *)desc.data;
2437         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2438         req->dest_vfid = func_id;
2439
2440         if (reset)
2441                 req->vf_rst = 0x1;
2442
2443         return hclge_cmd_send(&hdev->hw, &desc, 1);
2444 }
2445
2446 int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2447 {
2448         int i;
2449
2450         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2451                 struct hclge_vport *vport = &hdev->vport[i];
2452                 int ret;
2453
2454                 /* Send cmd to set/clear VF's FUNC_RST_ING */
2455                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2456                 if (ret) {
2457                         dev_err(&hdev->pdev->dev,
2458                                 "set vf(%d) rst failed %d!\n",
2459                                 vport->vport_id, ret);
2460                         return ret;
2461                 }
2462
2463                 if (!reset)
2464                         continue;
2465
2466                 /* Inform VF to process the reset.
2467                  * hclge_inform_reset_assert_to_vf may fail if VF
2468                  * driver is not loaded.
2469                  */
2470                 ret = hclge_inform_reset_assert_to_vf(vport);
2471                 if (ret)
2472                         dev_warn(&hdev->pdev->dev,
2473                                  "inform reset to vf(%d) failed %d!\n",
2474                                  vport->vport_id, ret);
2475         }
2476
2477         return 0;
2478 }
2479
2480 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2481 {
2482         struct hclge_desc desc;
2483         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2484         int ret;
2485
2486         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2487         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2488         req->fun_reset_vfid = func_id;
2489
2490         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2491         if (ret)
2492                 dev_err(&hdev->pdev->dev,
2493                         "send function reset cmd fail, status =%d\n", ret);
2494
2495         return ret;
2496 }
2497
2498 static void hclge_do_reset(struct hclge_dev *hdev)
2499 {
2500         struct pci_dev *pdev = hdev->pdev;
2501         u32 val;
2502
2503         switch (hdev->reset_type) {
2504         case HNAE3_GLOBAL_RESET:
2505                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2506                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2507                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2508                 dev_info(&pdev->dev, "Global Reset requested\n");
2509                 break;
2510         case HNAE3_CORE_RESET:
2511                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2512                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2513                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2514                 dev_info(&pdev->dev, "Core Reset requested\n");
2515                 break;
2516         case HNAE3_FUNC_RESET:
2517                 dev_info(&pdev->dev, "PF Reset requested\n");
2518                 /* schedule again to check later */
2519                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2520                 hclge_reset_task_schedule(hdev);
2521                 break;
2522         case HNAE3_FLR_RESET:
2523                 dev_info(&pdev->dev, "FLR requested\n");
2524                 /* schedule again to check later */
2525                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2526                 hclge_reset_task_schedule(hdev);
2527                 break;
2528         default:
2529                 dev_warn(&pdev->dev,
2530                          "Unsupported reset type: %d\n", hdev->reset_type);
2531                 break;
2532         }
2533 }
2534
2535 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2536                                                    unsigned long *addr)
2537 {
2538         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2539
2540         /* return the highest priority reset level amongst all */
2541         if (test_bit(HNAE3_IMP_RESET, addr)) {
2542                 rst_level = HNAE3_IMP_RESET;
2543                 clear_bit(HNAE3_IMP_RESET, addr);
2544                 clear_bit(HNAE3_GLOBAL_RESET, addr);
2545                 clear_bit(HNAE3_CORE_RESET, addr);
2546                 clear_bit(HNAE3_FUNC_RESET, addr);
2547         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2548                 rst_level = HNAE3_GLOBAL_RESET;
2549                 clear_bit(HNAE3_GLOBAL_RESET, addr);
2550                 clear_bit(HNAE3_CORE_RESET, addr);
2551                 clear_bit(HNAE3_FUNC_RESET, addr);
2552         } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2553                 rst_level = HNAE3_CORE_RESET;
2554                 clear_bit(HNAE3_CORE_RESET, addr);
2555                 clear_bit(HNAE3_FUNC_RESET, addr);
2556         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2557                 rst_level = HNAE3_FUNC_RESET;
2558                 clear_bit(HNAE3_FUNC_RESET, addr);
2559         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2560                 rst_level = HNAE3_FLR_RESET;
2561                 clear_bit(HNAE3_FLR_RESET, addr);
2562         }
2563
2564         return rst_level;
2565 }
2566
2567 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2568 {
2569         u32 clearval = 0;
2570
2571         switch (hdev->reset_type) {
2572         case HNAE3_IMP_RESET:
2573                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2574                 break;
2575         case HNAE3_GLOBAL_RESET:
2576                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2577                 break;
2578         case HNAE3_CORE_RESET:
2579                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2580                 break;
2581         default:
2582                 break;
2583         }
2584
2585         if (!clearval)
2586                 return;
2587
2588         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2589         hclge_enable_vector(&hdev->misc_vector, true);
2590 }
2591
2592 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2593 {
2594         int ret = 0;
2595
2596         switch (hdev->reset_type) {
2597         case HNAE3_FUNC_RESET:
2598                 /* fall through */
2599         case HNAE3_FLR_RESET:
2600                 ret = hclge_set_all_vf_rst(hdev, true);
2601                 break;
2602         default:
2603                 break;
2604         }
2605
2606         return ret;
2607 }
2608
2609 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2610 {
2611         u32 reg_val;
2612         int ret = 0;
2613
2614         switch (hdev->reset_type) {
2615         case HNAE3_FUNC_RESET:
2616                 /* There is no mechanism for PF to know if VF has stopped IO
2617                  * for now, just wait 100 ms for VF to stop IO
2618                  */
2619                 msleep(100);
2620                 ret = hclge_func_reset_cmd(hdev, 0);
2621                 if (ret) {
2622                         dev_err(&hdev->pdev->dev,
2623                                 "asserting function reset fail %d!\n", ret);
2624                         return ret;
2625                 }
2626
2627                 /* After performaning pf reset, it is not necessary to do the
2628                  * mailbox handling or send any command to firmware, because
2629                  * any mailbox handling or command to firmware is only valid
2630                  * after hclge_cmd_init is called.
2631                  */
2632                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2633                 break;
2634         case HNAE3_FLR_RESET:
2635                 /* There is no mechanism for PF to know if VF has stopped IO
2636                  * for now, just wait 100 ms for VF to stop IO
2637                  */
2638                 msleep(100);
2639                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2640                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2641                 break;
2642         case HNAE3_IMP_RESET:
2643                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2644                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2645                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2646                 break;
2647         default:
2648                 break;
2649         }
2650
2651         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2652
2653         return ret;
2654 }
2655
2656 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2657 {
2658 #define MAX_RESET_FAIL_CNT 5
2659 #define RESET_UPGRADE_DELAY_SEC 10
2660
2661         if (hdev->reset_pending) {
2662                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2663                          hdev->reset_pending);
2664                 return true;
2665         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2666                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2667                     BIT(HCLGE_IMP_RESET_BIT))) {
2668                 dev_info(&hdev->pdev->dev,
2669                          "reset failed because IMP Reset is pending\n");
2670                 hclge_clear_reset_cause(hdev);
2671                 return false;
2672         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2673                 hdev->reset_fail_cnt++;
2674                 if (is_timeout) {
2675                         set_bit(hdev->reset_type, &hdev->reset_pending);
2676                         dev_info(&hdev->pdev->dev,
2677                                  "re-schedule to wait for hw reset done\n");
2678                         return true;
2679                 }
2680
2681                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2682                 hclge_clear_reset_cause(hdev);
2683                 mod_timer(&hdev->reset_timer,
2684                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2685
2686                 return false;
2687         }
2688
2689         hclge_clear_reset_cause(hdev);
2690         dev_err(&hdev->pdev->dev, "Reset fail!\n");
2691         return false;
2692 }
2693
2694 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2695 {
2696         int ret = 0;
2697
2698         switch (hdev->reset_type) {
2699         case HNAE3_FUNC_RESET:
2700                 /* fall through */
2701         case HNAE3_FLR_RESET:
2702                 ret = hclge_set_all_vf_rst(hdev, false);
2703                 break;
2704         default:
2705                 break;
2706         }
2707
2708         return ret;
2709 }
2710
2711 static void hclge_reset(struct hclge_dev *hdev)
2712 {
2713         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2714         bool is_timeout = false;
2715         int ret;
2716
2717         /* Initialize ae_dev reset status as well, in case enet layer wants to
2718          * know if device is undergoing reset
2719          */
2720         ae_dev->reset_type = hdev->reset_type;
2721         hdev->reset_count++;
2722         hdev->last_reset_time = jiffies;
2723         /* perform reset of the stack & ae device for a client */
2724         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2725         if (ret)
2726                 goto err_reset;
2727
2728         ret = hclge_reset_prepare_down(hdev);
2729         if (ret)
2730                 goto err_reset;
2731
2732         rtnl_lock();
2733         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2734         if (ret)
2735                 goto err_reset_lock;
2736
2737         rtnl_unlock();
2738
2739         ret = hclge_reset_prepare_wait(hdev);
2740         if (ret)
2741                 goto err_reset;
2742
2743         if (hclge_reset_wait(hdev)) {
2744                 is_timeout = true;
2745                 goto err_reset;
2746         }
2747
2748         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2749         if (ret)
2750                 goto err_reset;
2751
2752         rtnl_lock();
2753         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2754         if (ret)
2755                 goto err_reset_lock;
2756
2757         ret = hclge_reset_ae_dev(hdev->ae_dev);
2758         if (ret)
2759                 goto err_reset_lock;
2760
2761         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2762         if (ret)
2763                 goto err_reset_lock;
2764
2765         hclge_clear_reset_cause(hdev);
2766
2767         ret = hclge_reset_prepare_up(hdev);
2768         if (ret)
2769                 goto err_reset_lock;
2770
2771         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2772         if (ret)
2773                 goto err_reset_lock;
2774
2775         rtnl_unlock();
2776
2777         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2778         if (ret)
2779                 goto err_reset;
2780
2781         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2782         if (ret)
2783                 goto err_reset;
2784
2785         return;
2786
2787 err_reset_lock:
2788         rtnl_unlock();
2789 err_reset:
2790         if (hclge_reset_err_handle(hdev, is_timeout))
2791                 hclge_reset_task_schedule(hdev);
2792 }
2793
2794 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
2795 {
2796         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2797         struct hclge_dev *hdev = ae_dev->priv;
2798
2799         /* We might end up getting called broadly because of 2 below cases:
2800          * 1. Recoverable error was conveyed through APEI and only way to bring
2801          *    normalcy is to reset.
2802          * 2. A new reset request from the stack due to timeout
2803          *
2804          * For the first case,error event might not have ae handle available.
2805          * check if this is a new reset request and we are not here just because
2806          * last reset attempt did not succeed and watchdog hit us again. We will
2807          * know this if last reset request did not occur very recently (watchdog
2808          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2809          * In case of new request we reset the "reset level" to PF reset.
2810          * And if it is a repeat reset request of the most recent one then we
2811          * want to make sure we throttle the reset request. Therefore, we will
2812          * not allow it again before 3*HZ times.
2813          */
2814         if (!handle)
2815                 handle = &hdev->vport[0].nic;
2816
2817         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
2818                 return;
2819         else if (hdev->default_reset_request)
2820                 hdev->reset_level =
2821                         hclge_get_reset_level(hdev,
2822                                               &hdev->default_reset_request);
2823         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
2824                 hdev->reset_level = HNAE3_FUNC_RESET;
2825
2826         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2827                  hdev->reset_level);
2828
2829         /* request reset & schedule reset task */
2830         set_bit(hdev->reset_level, &hdev->reset_request);
2831         hclge_reset_task_schedule(hdev);
2832
2833         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
2834                 hdev->reset_level++;
2835 }
2836
2837 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
2838                                         enum hnae3_reset_type rst_type)
2839 {
2840         struct hclge_dev *hdev = ae_dev->priv;
2841
2842         set_bit(rst_type, &hdev->default_reset_request);
2843 }
2844
2845 static void hclge_reset_timer(struct timer_list *t)
2846 {
2847         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
2848
2849         dev_info(&hdev->pdev->dev,
2850                  "triggering global reset in reset timer\n");
2851         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
2852         hclge_reset_event(hdev->pdev, NULL);
2853 }
2854
2855 static void hclge_reset_subtask(struct hclge_dev *hdev)
2856 {
2857         /* check if there is any ongoing reset in the hardware. This status can
2858          * be checked from reset_pending. If there is then, we need to wait for
2859          * hardware to complete reset.
2860          *    a. If we are able to figure out in reasonable time that hardware
2861          *       has fully resetted then, we can proceed with driver, client
2862          *       reset.
2863          *    b. else, we can come back later to check this status so re-sched
2864          *       now.
2865          */
2866         hdev->last_reset_time = jiffies;
2867         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2868         if (hdev->reset_type != HNAE3_NONE_RESET)
2869                 hclge_reset(hdev);
2870
2871         /* check if we got any *new* reset requests to be honored */
2872         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2873         if (hdev->reset_type != HNAE3_NONE_RESET)
2874                 hclge_do_reset(hdev);
2875
2876         hdev->reset_type = HNAE3_NONE_RESET;
2877 }
2878
2879 static void hclge_reset_service_task(struct work_struct *work)
2880 {
2881         struct hclge_dev *hdev =
2882                 container_of(work, struct hclge_dev, rst_service_task);
2883
2884         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2885                 return;
2886
2887         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2888
2889         hclge_reset_subtask(hdev);
2890
2891         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2892 }
2893
2894 static void hclge_mailbox_service_task(struct work_struct *work)
2895 {
2896         struct hclge_dev *hdev =
2897                 container_of(work, struct hclge_dev, mbx_service_task);
2898
2899         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2900                 return;
2901
2902         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2903
2904         hclge_mbx_handler(hdev);
2905
2906         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2907 }
2908
2909 static void hclge_update_vport_alive(struct hclge_dev *hdev)
2910 {
2911         int i;
2912
2913         /* start from vport 1 for PF is always alive */
2914         for (i = 1; i < hdev->num_alloc_vport; i++) {
2915                 struct hclge_vport *vport = &hdev->vport[i];
2916
2917                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
2918                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
2919
2920                 /* If vf is not alive, set to default value */
2921                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2922                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
2923         }
2924 }
2925
2926 static void hclge_service_task(struct work_struct *work)
2927 {
2928         struct hclge_dev *hdev =
2929                 container_of(work, struct hclge_dev, service_task);
2930
2931         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2932                 hclge_update_stats_for_all(hdev);
2933                 hdev->hw_stats.stats_timer = 0;
2934         }
2935
2936         hclge_update_speed_duplex(hdev);
2937         hclge_update_link_status(hdev);
2938         hclge_update_vport_alive(hdev);
2939         hclge_service_complete(hdev);
2940 }
2941
2942 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2943 {
2944         /* VF handle has no client */
2945         if (!handle->client)
2946                 return container_of(handle, struct hclge_vport, nic);
2947         else if (handle->client->type == HNAE3_CLIENT_ROCE)
2948                 return container_of(handle, struct hclge_vport, roce);
2949         else
2950                 return container_of(handle, struct hclge_vport, nic);
2951 }
2952
2953 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2954                             struct hnae3_vector_info *vector_info)
2955 {
2956         struct hclge_vport *vport = hclge_get_vport(handle);
2957         struct hnae3_vector_info *vector = vector_info;
2958         struct hclge_dev *hdev = vport->back;
2959         int alloc = 0;
2960         int i, j;
2961
2962         vector_num = min(hdev->num_msi_left, vector_num);
2963
2964         for (j = 0; j < vector_num; j++) {
2965                 for (i = 1; i < hdev->num_msi; i++) {
2966                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2967                                 vector->vector = pci_irq_vector(hdev->pdev, i);
2968                                 vector->io_addr = hdev->hw.io_base +
2969                                         HCLGE_VECTOR_REG_BASE +
2970                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2971                                         vport->vport_id *
2972                                         HCLGE_VECTOR_VF_OFFSET;
2973                                 hdev->vector_status[i] = vport->vport_id;
2974                                 hdev->vector_irq[i] = vector->vector;
2975
2976                                 vector++;
2977                                 alloc++;
2978
2979                                 break;
2980                         }
2981                 }
2982         }
2983         hdev->num_msi_left -= alloc;
2984         hdev->num_msi_used += alloc;
2985
2986         return alloc;
2987 }
2988
2989 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2990 {
2991         int i;
2992
2993         for (i = 0; i < hdev->num_msi; i++)
2994                 if (vector == hdev->vector_irq[i])
2995                         return i;
2996
2997         return -EINVAL;
2998 }
2999
3000 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3001 {
3002         struct hclge_vport *vport = hclge_get_vport(handle);
3003         struct hclge_dev *hdev = vport->back;
3004         int vector_id;
3005
3006         vector_id = hclge_get_vector_index(hdev, vector);
3007         if (vector_id < 0) {
3008                 dev_err(&hdev->pdev->dev,
3009                         "Get vector index fail. vector_id =%d\n", vector_id);
3010                 return vector_id;
3011         }
3012
3013         hclge_free_vector(hdev, vector_id);
3014
3015         return 0;
3016 }
3017
3018 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3019 {
3020         return HCLGE_RSS_KEY_SIZE;
3021 }
3022
3023 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3024 {
3025         return HCLGE_RSS_IND_TBL_SIZE;
3026 }
3027
3028 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3029                                   const u8 hfunc, const u8 *key)
3030 {
3031         struct hclge_rss_config_cmd *req;
3032         struct hclge_desc desc;
3033         int key_offset;
3034         int key_size;
3035         int ret;
3036
3037         req = (struct hclge_rss_config_cmd *)desc.data;
3038
3039         for (key_offset = 0; key_offset < 3; key_offset++) {
3040                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3041                                            false);
3042
3043                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3044                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3045
3046                 if (key_offset == 2)
3047                         key_size =
3048                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3049                 else
3050                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3051
3052                 memcpy(req->hash_key,
3053                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3054
3055                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3056                 if (ret) {
3057                         dev_err(&hdev->pdev->dev,
3058                                 "Configure RSS config fail, status = %d\n",
3059                                 ret);
3060                         return ret;
3061                 }
3062         }
3063         return 0;
3064 }
3065
3066 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3067 {
3068         struct hclge_rss_indirection_table_cmd *req;
3069         struct hclge_desc desc;
3070         int i, j;
3071         int ret;
3072
3073         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3074
3075         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3076                 hclge_cmd_setup_basic_desc
3077                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3078
3079                 req->start_table_index =
3080                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3081                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3082
3083                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3084                         req->rss_result[j] =
3085                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3086
3087                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3088                 if (ret) {
3089                         dev_err(&hdev->pdev->dev,
3090                                 "Configure rss indir table fail,status = %d\n",
3091                                 ret);
3092                         return ret;
3093                 }
3094         }
3095         return 0;
3096 }
3097
3098 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3099                                  u16 *tc_size, u16 *tc_offset)
3100 {
3101         struct hclge_rss_tc_mode_cmd *req;
3102         struct hclge_desc desc;
3103         int ret;
3104         int i;
3105
3106         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3107         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3108
3109         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3110                 u16 mode = 0;
3111
3112                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3113                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3114                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3115                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3116                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3117
3118                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3119         }
3120
3121         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3122         if (ret)
3123                 dev_err(&hdev->pdev->dev,
3124                         "Configure rss tc mode fail, status = %d\n", ret);
3125
3126         return ret;
3127 }
3128
3129 static void hclge_get_rss_type(struct hclge_vport *vport)
3130 {
3131         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3132             vport->rss_tuple_sets.ipv4_udp_en ||
3133             vport->rss_tuple_sets.ipv4_sctp_en ||
3134             vport->rss_tuple_sets.ipv6_tcp_en ||
3135             vport->rss_tuple_sets.ipv6_udp_en ||
3136             vport->rss_tuple_sets.ipv6_sctp_en)
3137                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3138         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3139                  vport->rss_tuple_sets.ipv6_fragment_en)
3140                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3141         else
3142                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3143 }
3144
3145 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3146 {
3147         struct hclge_rss_input_tuple_cmd *req;
3148         struct hclge_desc desc;
3149         int ret;
3150
3151         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3152
3153         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3154
3155         /* Get the tuple cfg from pf */
3156         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3157         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3158         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3159         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3160         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3161         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3162         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3163         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3164         hclge_get_rss_type(&hdev->vport[0]);
3165         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3166         if (ret)
3167                 dev_err(&hdev->pdev->dev,
3168                         "Configure rss input fail, status = %d\n", ret);
3169         return ret;
3170 }
3171
3172 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3173                          u8 *key, u8 *hfunc)
3174 {
3175         struct hclge_vport *vport = hclge_get_vport(handle);
3176         int i;
3177
3178         /* Get hash algorithm */
3179         if (hfunc) {
3180                 switch (vport->rss_algo) {
3181                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3182                         *hfunc = ETH_RSS_HASH_TOP;
3183                         break;
3184                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3185                         *hfunc = ETH_RSS_HASH_XOR;
3186                         break;
3187                 default:
3188                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3189                         break;
3190                 }
3191         }
3192
3193         /* Get the RSS Key required by the user */
3194         if (key)
3195                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3196
3197         /* Get indirect table */
3198         if (indir)
3199                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3200                         indir[i] =  vport->rss_indirection_tbl[i];
3201
3202         return 0;
3203 }
3204
3205 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3206                          const  u8 *key, const  u8 hfunc)
3207 {
3208         struct hclge_vport *vport = hclge_get_vport(handle);
3209         struct hclge_dev *hdev = vport->back;
3210         u8 hash_algo;
3211         int ret, i;
3212
3213         /* Set the RSS Hash Key if specififed by the user */
3214         if (key) {
3215                 switch (hfunc) {
3216                 case ETH_RSS_HASH_TOP:
3217                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3218                         break;
3219                 case ETH_RSS_HASH_XOR:
3220                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3221                         break;
3222                 case ETH_RSS_HASH_NO_CHANGE:
3223                         hash_algo = vport->rss_algo;
3224                         break;
3225                 default:
3226                         return -EINVAL;
3227                 }
3228
3229                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3230                 if (ret)
3231                         return ret;
3232
3233                 /* Update the shadow RSS key with user specified qids */
3234                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3235                 vport->rss_algo = hash_algo;
3236         }
3237
3238         /* Update the shadow RSS table with user specified qids */
3239         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3240                 vport->rss_indirection_tbl[i] = indir[i];
3241
3242         /* Update the hardware */
3243         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3244 }
3245
3246 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3247 {
3248         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3249
3250         if (nfc->data & RXH_L4_B_2_3)
3251                 hash_sets |= HCLGE_D_PORT_BIT;
3252         else
3253                 hash_sets &= ~HCLGE_D_PORT_BIT;
3254
3255         if (nfc->data & RXH_IP_SRC)
3256                 hash_sets |= HCLGE_S_IP_BIT;
3257         else
3258                 hash_sets &= ~HCLGE_S_IP_BIT;
3259
3260         if (nfc->data & RXH_IP_DST)
3261                 hash_sets |= HCLGE_D_IP_BIT;
3262         else
3263                 hash_sets &= ~HCLGE_D_IP_BIT;
3264
3265         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3266                 hash_sets |= HCLGE_V_TAG_BIT;
3267
3268         return hash_sets;
3269 }
3270
3271 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3272                                struct ethtool_rxnfc *nfc)
3273 {
3274         struct hclge_vport *vport = hclge_get_vport(handle);
3275         struct hclge_dev *hdev = vport->back;
3276         struct hclge_rss_input_tuple_cmd *req;
3277         struct hclge_desc desc;
3278         u8 tuple_sets;
3279         int ret;
3280
3281         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3282                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3283                 return -EINVAL;
3284
3285         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3286         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3287
3288         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3289         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3290         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3291         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3292         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3293         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3294         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3295         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3296
3297         tuple_sets = hclge_get_rss_hash_bits(nfc);
3298         switch (nfc->flow_type) {
3299         case TCP_V4_FLOW:
3300                 req->ipv4_tcp_en = tuple_sets;
3301                 break;
3302         case TCP_V6_FLOW:
3303                 req->ipv6_tcp_en = tuple_sets;
3304                 break;
3305         case UDP_V4_FLOW:
3306                 req->ipv4_udp_en = tuple_sets;
3307                 break;
3308         case UDP_V6_FLOW:
3309                 req->ipv6_udp_en = tuple_sets;
3310                 break;
3311         case SCTP_V4_FLOW:
3312                 req->ipv4_sctp_en = tuple_sets;
3313                 break;
3314         case SCTP_V6_FLOW:
3315                 if ((nfc->data & RXH_L4_B_0_1) ||
3316                     (nfc->data & RXH_L4_B_2_3))
3317                         return -EINVAL;
3318
3319                 req->ipv6_sctp_en = tuple_sets;
3320                 break;
3321         case IPV4_FLOW:
3322                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3323                 break;
3324         case IPV6_FLOW:
3325                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3326                 break;
3327         default:
3328                 return -EINVAL;
3329         }
3330
3331         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3332         if (ret) {
3333                 dev_err(&hdev->pdev->dev,
3334                         "Set rss tuple fail, status = %d\n", ret);
3335                 return ret;
3336         }
3337
3338         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3339         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3340         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3341         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3342         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3343         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3344         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3345         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3346         hclge_get_rss_type(vport);
3347         return 0;
3348 }
3349
3350 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3351                                struct ethtool_rxnfc *nfc)
3352 {
3353         struct hclge_vport *vport = hclge_get_vport(handle);
3354         u8 tuple_sets;
3355
3356         nfc->data = 0;
3357
3358         switch (nfc->flow_type) {
3359         case TCP_V4_FLOW:
3360                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3361                 break;
3362         case UDP_V4_FLOW:
3363                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3364                 break;
3365         case TCP_V6_FLOW:
3366                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3367                 break;
3368         case UDP_V6_FLOW:
3369                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3370                 break;
3371         case SCTP_V4_FLOW:
3372                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3373                 break;
3374         case SCTP_V6_FLOW:
3375                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3376                 break;
3377         case IPV4_FLOW:
3378         case IPV6_FLOW:
3379                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3380                 break;
3381         default:
3382                 return -EINVAL;
3383         }
3384
3385         if (!tuple_sets)
3386                 return 0;
3387
3388         if (tuple_sets & HCLGE_D_PORT_BIT)
3389                 nfc->data |= RXH_L4_B_2_3;
3390         if (tuple_sets & HCLGE_S_PORT_BIT)
3391                 nfc->data |= RXH_L4_B_0_1;
3392         if (tuple_sets & HCLGE_D_IP_BIT)
3393                 nfc->data |= RXH_IP_DST;
3394         if (tuple_sets & HCLGE_S_IP_BIT)
3395                 nfc->data |= RXH_IP_SRC;
3396
3397         return 0;
3398 }
3399
3400 static int hclge_get_tc_size(struct hnae3_handle *handle)
3401 {
3402         struct hclge_vport *vport = hclge_get_vport(handle);
3403         struct hclge_dev *hdev = vport->back;
3404
3405         return hdev->rss_size_max;
3406 }
3407
3408 int hclge_rss_init_hw(struct hclge_dev *hdev)
3409 {
3410         struct hclge_vport *vport = hdev->vport;
3411         u8 *rss_indir = vport[0].rss_indirection_tbl;
3412         u16 rss_size = vport[0].alloc_rss_size;
3413         u8 *key = vport[0].rss_hash_key;
3414         u8 hfunc = vport[0].rss_algo;
3415         u16 tc_offset[HCLGE_MAX_TC_NUM];
3416         u16 tc_valid[HCLGE_MAX_TC_NUM];
3417         u16 tc_size[HCLGE_MAX_TC_NUM];
3418         u16 roundup_size;
3419         int i, ret;
3420
3421         ret = hclge_set_rss_indir_table(hdev, rss_indir);
3422         if (ret)
3423                 return ret;
3424
3425         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3426         if (ret)
3427                 return ret;
3428
3429         ret = hclge_set_rss_input_tuple(hdev);
3430         if (ret)
3431                 return ret;
3432
3433         /* Each TC have the same queue size, and tc_size set to hardware is
3434          * the log2 of roundup power of two of rss_size, the acutal queue
3435          * size is limited by indirection table.
3436          */
3437         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3438                 dev_err(&hdev->pdev->dev,
3439                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3440                         rss_size);
3441                 return -EINVAL;
3442         }
3443
3444         roundup_size = roundup_pow_of_two(rss_size);
3445         roundup_size = ilog2(roundup_size);
3446
3447         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3448                 tc_valid[i] = 0;
3449
3450                 if (!(hdev->hw_tc_map & BIT(i)))
3451                         continue;
3452
3453                 tc_valid[i] = 1;
3454                 tc_size[i] = roundup_size;
3455                 tc_offset[i] = rss_size * i;
3456         }
3457
3458         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3459 }
3460
3461 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3462 {
3463         struct hclge_vport *vport = hdev->vport;
3464         int i, j;
3465
3466         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3467                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3468                         vport[j].rss_indirection_tbl[i] =
3469                                 i % vport[j].alloc_rss_size;
3470         }
3471 }
3472
3473 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3474 {
3475         struct hclge_vport *vport = hdev->vport;
3476         int i;
3477
3478         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3479                 vport[i].rss_tuple_sets.ipv4_tcp_en =
3480                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3481                 vport[i].rss_tuple_sets.ipv4_udp_en =
3482                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3483                 vport[i].rss_tuple_sets.ipv4_sctp_en =
3484                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3485                 vport[i].rss_tuple_sets.ipv4_fragment_en =
3486                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3487                 vport[i].rss_tuple_sets.ipv6_tcp_en =
3488                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3489                 vport[i].rss_tuple_sets.ipv6_udp_en =
3490                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3491                 vport[i].rss_tuple_sets.ipv6_sctp_en =
3492                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3493                 vport[i].rss_tuple_sets.ipv6_fragment_en =
3494                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3495
3496                 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3497
3498                 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3499         }
3500
3501         hclge_rss_indir_init_cfg(hdev);
3502 }
3503
3504 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3505                                 int vector_id, bool en,
3506                                 struct hnae3_ring_chain_node *ring_chain)
3507 {
3508         struct hclge_dev *hdev = vport->back;
3509         struct hnae3_ring_chain_node *node;
3510         struct hclge_desc desc;
3511         struct hclge_ctrl_vector_chain_cmd *req
3512                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3513         enum hclge_cmd_status status;
3514         enum hclge_opcode_type op;
3515         u16 tqp_type_and_id;
3516         int i;
3517
3518         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3519         hclge_cmd_setup_basic_desc(&desc, op, false);
3520         req->int_vector_id = vector_id;
3521
3522         i = 0;
3523         for (node = ring_chain; node; node = node->next) {
3524                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3525                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3526                                 HCLGE_INT_TYPE_S,
3527                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3528                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3529                                 HCLGE_TQP_ID_S, node->tqp_index);
3530                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3531                                 HCLGE_INT_GL_IDX_S,
3532                                 hnae3_get_field(node->int_gl_idx,
3533                                                 HNAE3_RING_GL_IDX_M,
3534                                                 HNAE3_RING_GL_IDX_S));
3535                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3536                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3537                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3538                         req->vfid = vport->vport_id;
3539
3540                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
3541                         if (status) {
3542                                 dev_err(&hdev->pdev->dev,
3543                                         "Map TQP fail, status is %d.\n",
3544                                         status);
3545                                 return -EIO;
3546                         }
3547                         i = 0;
3548
3549                         hclge_cmd_setup_basic_desc(&desc,
3550                                                    op,
3551                                                    false);
3552                         req->int_vector_id = vector_id;
3553                 }
3554         }
3555
3556         if (i > 0) {
3557                 req->int_cause_num = i;
3558                 req->vfid = vport->vport_id;
3559                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3560                 if (status) {
3561                         dev_err(&hdev->pdev->dev,
3562                                 "Map TQP fail, status is %d.\n", status);
3563                         return -EIO;
3564                 }
3565         }
3566
3567         return 0;
3568 }
3569
3570 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3571                                     int vector,
3572                                     struct hnae3_ring_chain_node *ring_chain)
3573 {
3574         struct hclge_vport *vport = hclge_get_vport(handle);
3575         struct hclge_dev *hdev = vport->back;
3576         int vector_id;
3577
3578         vector_id = hclge_get_vector_index(hdev, vector);
3579         if (vector_id < 0) {
3580                 dev_err(&hdev->pdev->dev,
3581                         "Get vector index fail. vector_id =%d\n", vector_id);
3582                 return vector_id;
3583         }
3584
3585         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3586 }
3587
3588 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3589                                        int vector,
3590                                        struct hnae3_ring_chain_node *ring_chain)
3591 {
3592         struct hclge_vport *vport = hclge_get_vport(handle);
3593         struct hclge_dev *hdev = vport->back;
3594         int vector_id, ret;
3595
3596         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3597                 return 0;
3598
3599         vector_id = hclge_get_vector_index(hdev, vector);
3600         if (vector_id < 0) {
3601                 dev_err(&handle->pdev->dev,
3602                         "Get vector index fail. ret =%d\n", vector_id);
3603                 return vector_id;
3604         }
3605
3606         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3607         if (ret)
3608                 dev_err(&handle->pdev->dev,
3609                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3610                         vector_id,
3611                         ret);
3612
3613         return ret;
3614 }
3615
3616 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3617                                struct hclge_promisc_param *param)
3618 {
3619         struct hclge_promisc_cfg_cmd *req;
3620         struct hclge_desc desc;
3621         int ret;
3622
3623         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3624
3625         req = (struct hclge_promisc_cfg_cmd *)desc.data;
3626         req->vf_id = param->vf_id;
3627
3628         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3629          * pdev revision(0x20), new revision support them. The
3630          * value of this two fields will not return error when driver
3631          * send command to fireware in revision(0x20).
3632          */
3633         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3634                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3635
3636         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3637         if (ret)
3638                 dev_err(&hdev->pdev->dev,
3639                         "Set promisc mode fail, status is %d.\n", ret);
3640
3641         return ret;
3642 }
3643
3644 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3645                               bool en_mc, bool en_bc, int vport_id)
3646 {
3647         if (!param)
3648                 return;
3649
3650         memset(param, 0, sizeof(struct hclge_promisc_param));
3651         if (en_uc)
3652                 param->enable = HCLGE_PROMISC_EN_UC;
3653         if (en_mc)
3654                 param->enable |= HCLGE_PROMISC_EN_MC;
3655         if (en_bc)
3656                 param->enable |= HCLGE_PROMISC_EN_BC;
3657         param->vf_id = vport_id;
3658 }
3659
3660 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3661                                   bool en_mc_pmc)
3662 {
3663         struct hclge_vport *vport = hclge_get_vport(handle);
3664         struct hclge_dev *hdev = vport->back;
3665         struct hclge_promisc_param param;
3666
3667         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
3668                                  vport->vport_id);
3669         return hclge_cmd_set_promisc_mode(hdev, &param);
3670 }
3671
3672 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3673 {
3674         struct hclge_get_fd_mode_cmd *req;
3675         struct hclge_desc desc;
3676         int ret;
3677
3678         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3679
3680         req = (struct hclge_get_fd_mode_cmd *)desc.data;
3681
3682         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3683         if (ret) {
3684                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3685                 return ret;
3686         }
3687
3688         *fd_mode = req->mode;
3689
3690         return ret;
3691 }
3692
3693 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3694                                    u32 *stage1_entry_num,
3695                                    u32 *stage2_entry_num,
3696                                    u16 *stage1_counter_num,
3697                                    u16 *stage2_counter_num)
3698 {
3699         struct hclge_get_fd_allocation_cmd *req;
3700         struct hclge_desc desc;
3701         int ret;
3702
3703         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3704
3705         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3706
3707         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3708         if (ret) {
3709                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3710                         ret);
3711                 return ret;
3712         }
3713
3714         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3715         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3716         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3717         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3718
3719         return ret;
3720 }
3721
3722 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3723 {
3724         struct hclge_set_fd_key_config_cmd *req;
3725         struct hclge_fd_key_cfg *stage;
3726         struct hclge_desc desc;
3727         int ret;
3728
3729         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3730
3731         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3732         stage = &hdev->fd_cfg.key_cfg[stage_num];
3733         req->stage = stage_num;
3734         req->key_select = stage->key_sel;
3735         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3736         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3737         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3738         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3739         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3740         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3741
3742         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3743         if (ret)
3744                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3745
3746         return ret;
3747 }
3748
3749 static int hclge_init_fd_config(struct hclge_dev *hdev)
3750 {
3751 #define LOW_2_WORDS             0x03
3752         struct hclge_fd_key_cfg *key_cfg;
3753         int ret;
3754
3755         if (!hnae3_dev_fd_supported(hdev))
3756                 return 0;
3757
3758         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3759         if (ret)
3760                 return ret;
3761
3762         switch (hdev->fd_cfg.fd_mode) {
3763         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3764                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3765                 break;
3766         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3767                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3768                 break;
3769         default:
3770                 dev_err(&hdev->pdev->dev,
3771                         "Unsupported flow director mode %d\n",
3772                         hdev->fd_cfg.fd_mode);
3773                 return -EOPNOTSUPP;
3774         }
3775
3776         hdev->fd_cfg.fd_en = true;
3777         hdev->fd_cfg.proto_support =
3778                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3779                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3780         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3781         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3782         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3783         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3784         key_cfg->outer_sipv6_word_en = 0;
3785         key_cfg->outer_dipv6_word_en = 0;
3786
3787         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3788                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3789                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3790                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3791
3792         /* If use max 400bit key, we can support tuples for ether type */
3793         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3794                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
3795                 key_cfg->tuple_active |=
3796                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3797         }
3798
3799         /* roce_type is used to filter roce frames
3800          * dst_vport is used to specify the rule
3801          */
3802         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
3803
3804         ret = hclge_get_fd_allocation(hdev,
3805                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
3806                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
3807                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
3808                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
3809         if (ret)
3810                 return ret;
3811
3812         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
3813 }
3814
3815 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
3816                                 int loc, u8 *key, bool is_add)
3817 {
3818         struct hclge_fd_tcam_config_1_cmd *req1;
3819         struct hclge_fd_tcam_config_2_cmd *req2;
3820         struct hclge_fd_tcam_config_3_cmd *req3;
3821         struct hclge_desc desc[3];
3822         int ret;
3823
3824         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
3825         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3826         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
3827         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3828         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
3829
3830         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
3831         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
3832         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
3833
3834         req1->stage = stage;
3835         req1->xy_sel = sel_x ? 1 : 0;
3836         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
3837         req1->index = cpu_to_le32(loc);
3838         req1->entry_vld = sel_x ? is_add : 0;
3839
3840         if (key) {
3841                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
3842                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
3843                        sizeof(req2->tcam_data));
3844                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
3845                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
3846         }
3847
3848         ret = hclge_cmd_send(&hdev->hw, desc, 3);
3849         if (ret)
3850                 dev_err(&hdev->pdev->dev,
3851                         "config tcam key fail, ret=%d\n",
3852                         ret);
3853
3854         return ret;
3855 }
3856
3857 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
3858                               struct hclge_fd_ad_data *action)
3859 {
3860         struct hclge_fd_ad_config_cmd *req;
3861         struct hclge_desc desc;
3862         u64 ad_data = 0;
3863         int ret;
3864
3865         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
3866
3867         req = (struct hclge_fd_ad_config_cmd *)desc.data;
3868         req->index = cpu_to_le32(loc);
3869         req->stage = stage;
3870
3871         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
3872                       action->write_rule_id_to_bd);
3873         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
3874                         action->rule_id);
3875         ad_data <<= 32;
3876         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
3877         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
3878                       action->forward_to_direct_queue);
3879         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
3880                         action->queue_id);
3881         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
3882         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
3883                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
3884         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
3885         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
3886                         action->counter_id);
3887
3888         req->ad_data = cpu_to_le64(ad_data);
3889         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3890         if (ret)
3891                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
3892
3893         return ret;
3894 }
3895
3896 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
3897                                    struct hclge_fd_rule *rule)
3898 {
3899         u16 tmp_x_s, tmp_y_s;
3900         u32 tmp_x_l, tmp_y_l;
3901         int i;
3902
3903         if (rule->unused_tuple & tuple_bit)
3904                 return true;
3905
3906         switch (tuple_bit) {
3907         case 0:
3908                 return false;
3909         case BIT(INNER_DST_MAC):
3910                 for (i = 0; i < 6; i++) {
3911                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
3912                                rule->tuples_mask.dst_mac[i]);
3913                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
3914                                rule->tuples_mask.dst_mac[i]);
3915                 }
3916
3917                 return true;
3918         case BIT(INNER_SRC_MAC):
3919                 for (i = 0; i < 6; i++) {
3920                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
3921                                rule->tuples.src_mac[i]);
3922                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
3923                                rule->tuples.src_mac[i]);
3924                 }
3925
3926                 return true;
3927         case BIT(INNER_VLAN_TAG_FST):
3928                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
3929                        rule->tuples_mask.vlan_tag1);
3930                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
3931                        rule->tuples_mask.vlan_tag1);
3932                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3933                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3934
3935                 return true;
3936         case BIT(INNER_ETH_TYPE):
3937                 calc_x(tmp_x_s, rule->tuples.ether_proto,
3938                        rule->tuples_mask.ether_proto);
3939                 calc_y(tmp_y_s, rule->tuples.ether_proto,
3940                        rule->tuples_mask.ether_proto);
3941                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3942                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3943
3944                 return true;
3945         case BIT(INNER_IP_TOS):
3946                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3947                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3948
3949                 return true;
3950         case BIT(INNER_IP_PROTO):
3951                 calc_x(*key_x, rule->tuples.ip_proto,
3952                        rule->tuples_mask.ip_proto);
3953                 calc_y(*key_y, rule->tuples.ip_proto,
3954                        rule->tuples_mask.ip_proto);
3955
3956                 return true;
3957         case BIT(INNER_SRC_IP):
3958                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
3959                        rule->tuples_mask.src_ip[3]);
3960                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
3961                        rule->tuples_mask.src_ip[3]);
3962                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3963                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3964
3965                 return true;
3966         case BIT(INNER_DST_IP):
3967                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
3968                        rule->tuples_mask.dst_ip[3]);
3969                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
3970                        rule->tuples_mask.dst_ip[3]);
3971                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3972                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3973
3974                 return true;
3975         case BIT(INNER_SRC_PORT):
3976                 calc_x(tmp_x_s, rule->tuples.src_port,
3977                        rule->tuples_mask.src_port);
3978                 calc_y(tmp_y_s, rule->tuples.src_port,
3979                        rule->tuples_mask.src_port);
3980                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3981                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3982
3983                 return true;
3984         case BIT(INNER_DST_PORT):
3985                 calc_x(tmp_x_s, rule->tuples.dst_port,
3986                        rule->tuples_mask.dst_port);
3987                 calc_y(tmp_y_s, rule->tuples.dst_port,
3988                        rule->tuples_mask.dst_port);
3989                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3990                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3991
3992                 return true;
3993         default:
3994                 return false;
3995         }
3996 }
3997
3998 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
3999                                  u8 vf_id, u8 network_port_id)
4000 {
4001         u32 port_number = 0;
4002
4003         if (port_type == HOST_PORT) {
4004                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4005                                 pf_id);
4006                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4007                                 vf_id);
4008                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4009         } else {
4010                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4011                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4012                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4013         }
4014
4015         return port_number;
4016 }
4017
4018 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4019                                        __le32 *key_x, __le32 *key_y,
4020                                        struct hclge_fd_rule *rule)
4021 {
4022         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4023         u8 cur_pos = 0, tuple_size, shift_bits;
4024         int i;
4025
4026         for (i = 0; i < MAX_META_DATA; i++) {
4027                 tuple_size = meta_data_key_info[i].key_length;
4028                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4029
4030                 switch (tuple_bit) {
4031                 case BIT(ROCE_TYPE):
4032                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4033                         cur_pos += tuple_size;
4034                         break;
4035                 case BIT(DST_VPORT):
4036                         port_number = hclge_get_port_number(HOST_PORT, 0,
4037                                                             rule->vf_id, 0);
4038                         hnae3_set_field(meta_data,
4039                                         GENMASK(cur_pos + tuple_size, cur_pos),
4040                                         cur_pos, port_number);
4041                         cur_pos += tuple_size;
4042                         break;
4043                 default:
4044                         break;
4045                 }
4046         }
4047
4048         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4049         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4050         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4051
4052         *key_x = cpu_to_le32(tmp_x << shift_bits);
4053         *key_y = cpu_to_le32(tmp_y << shift_bits);
4054 }
4055
4056 /* A complete key is combined with meta data key and tuple key.
4057  * Meta data key is stored at the MSB region, and tuple key is stored at
4058  * the LSB region, unused bits will be filled 0.
4059  */
4060 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4061                             struct hclge_fd_rule *rule)
4062 {
4063         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4064         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4065         u8 *cur_key_x, *cur_key_y;
4066         int i, ret, tuple_size;
4067         u8 meta_data_region;
4068
4069         memset(key_x, 0, sizeof(key_x));
4070         memset(key_y, 0, sizeof(key_y));
4071         cur_key_x = key_x;
4072         cur_key_y = key_y;
4073
4074         for (i = 0 ; i < MAX_TUPLE; i++) {
4075                 bool tuple_valid;
4076                 u32 check_tuple;
4077
4078                 tuple_size = tuple_key_info[i].key_length / 8;
4079                 check_tuple = key_cfg->tuple_active & BIT(i);
4080
4081                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4082                                                      cur_key_y, rule);
4083                 if (tuple_valid) {
4084                         cur_key_x += tuple_size;
4085                         cur_key_y += tuple_size;
4086                 }
4087         }
4088
4089         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4090                         MAX_META_DATA_LENGTH / 8;
4091
4092         hclge_fd_convert_meta_data(key_cfg,
4093                                    (__le32 *)(key_x + meta_data_region),
4094                                    (__le32 *)(key_y + meta_data_region),
4095                                    rule);
4096
4097         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4098                                    true);
4099         if (ret) {
4100                 dev_err(&hdev->pdev->dev,
4101                         "fd key_y config fail, loc=%d, ret=%d\n",
4102                         rule->queue_id, ret);
4103                 return ret;
4104         }
4105
4106         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4107                                    true);
4108         if (ret)
4109                 dev_err(&hdev->pdev->dev,
4110                         "fd key_x config fail, loc=%d, ret=%d\n",
4111                         rule->queue_id, ret);
4112         return ret;
4113 }
4114
4115 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4116                                struct hclge_fd_rule *rule)
4117 {
4118         struct hclge_fd_ad_data ad_data;
4119
4120         ad_data.ad_id = rule->location;
4121
4122         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4123                 ad_data.drop_packet = true;
4124                 ad_data.forward_to_direct_queue = false;
4125                 ad_data.queue_id = 0;
4126         } else {
4127                 ad_data.drop_packet = false;
4128                 ad_data.forward_to_direct_queue = true;
4129                 ad_data.queue_id = rule->queue_id;
4130         }
4131
4132         ad_data.use_counter = false;
4133         ad_data.counter_id = 0;
4134
4135         ad_data.use_next_stage = false;
4136         ad_data.next_input_key = 0;
4137
4138         ad_data.write_rule_id_to_bd = true;
4139         ad_data.rule_id = rule->location;
4140
4141         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4142 }
4143
4144 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4145                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4146 {
4147         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4148         struct ethtool_usrip4_spec *usr_ip4_spec;
4149         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4150         struct ethtool_usrip6_spec *usr_ip6_spec;
4151         struct ethhdr *ether_spec;
4152
4153         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4154                 return -EINVAL;
4155
4156         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4157                 return -EOPNOTSUPP;
4158
4159         if ((fs->flow_type & FLOW_EXT) &&
4160             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4161                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4162                 return -EOPNOTSUPP;
4163         }
4164
4165         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4166         case SCTP_V4_FLOW:
4167         case TCP_V4_FLOW:
4168         case UDP_V4_FLOW:
4169                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4170                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4171
4172                 if (!tcp_ip4_spec->ip4src)
4173                         *unused |= BIT(INNER_SRC_IP);
4174
4175                 if (!tcp_ip4_spec->ip4dst)
4176                         *unused |= BIT(INNER_DST_IP);
4177
4178                 if (!tcp_ip4_spec->psrc)
4179                         *unused |= BIT(INNER_SRC_PORT);
4180
4181                 if (!tcp_ip4_spec->pdst)
4182                         *unused |= BIT(INNER_DST_PORT);
4183
4184                 if (!tcp_ip4_spec->tos)
4185                         *unused |= BIT(INNER_IP_TOS);
4186
4187                 break;
4188         case IP_USER_FLOW:
4189                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4190                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4191                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4192
4193                 if (!usr_ip4_spec->ip4src)
4194                         *unused |= BIT(INNER_SRC_IP);
4195
4196                 if (!usr_ip4_spec->ip4dst)
4197                         *unused |= BIT(INNER_DST_IP);
4198
4199                 if (!usr_ip4_spec->tos)
4200                         *unused |= BIT(INNER_IP_TOS);
4201
4202                 if (!usr_ip4_spec->proto)
4203                         *unused |= BIT(INNER_IP_PROTO);
4204
4205                 if (usr_ip4_spec->l4_4_bytes)
4206                         return -EOPNOTSUPP;
4207
4208                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4209                         return -EOPNOTSUPP;
4210
4211                 break;
4212         case SCTP_V6_FLOW:
4213         case TCP_V6_FLOW:
4214         case UDP_V6_FLOW:
4215                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4216                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4217                         BIT(INNER_IP_TOS);
4218
4219                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4220                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4221                         *unused |= BIT(INNER_SRC_IP);
4222
4223                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4224                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4225                         *unused |= BIT(INNER_DST_IP);
4226
4227                 if (!tcp_ip6_spec->psrc)
4228                         *unused |= BIT(INNER_SRC_PORT);
4229
4230                 if (!tcp_ip6_spec->pdst)
4231                         *unused |= BIT(INNER_DST_PORT);
4232
4233                 if (tcp_ip6_spec->tclass)
4234                         return -EOPNOTSUPP;
4235
4236                 break;
4237         case IPV6_USER_FLOW:
4238                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4239                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4240                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4241                         BIT(INNER_DST_PORT);
4242
4243                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4244                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4245                         *unused |= BIT(INNER_SRC_IP);
4246
4247                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4248                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4249                         *unused |= BIT(INNER_DST_IP);
4250
4251                 if (!usr_ip6_spec->l4_proto)
4252                         *unused |= BIT(INNER_IP_PROTO);
4253
4254                 if (usr_ip6_spec->tclass)
4255                         return -EOPNOTSUPP;
4256
4257                 if (usr_ip6_spec->l4_4_bytes)
4258                         return -EOPNOTSUPP;
4259
4260                 break;
4261         case ETHER_FLOW:
4262                 ether_spec = &fs->h_u.ether_spec;
4263                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4264                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4265                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4266
4267                 if (is_zero_ether_addr(ether_spec->h_source))
4268                         *unused |= BIT(INNER_SRC_MAC);
4269
4270                 if (is_zero_ether_addr(ether_spec->h_dest))
4271                         *unused |= BIT(INNER_DST_MAC);
4272
4273                 if (!ether_spec->h_proto)
4274                         *unused |= BIT(INNER_ETH_TYPE);
4275
4276                 break;
4277         default:
4278                 return -EOPNOTSUPP;
4279         }
4280
4281         if ((fs->flow_type & FLOW_EXT)) {
4282                 if (fs->h_ext.vlan_etype)
4283                         return -EOPNOTSUPP;
4284                 if (!fs->h_ext.vlan_tci)
4285                         *unused |= BIT(INNER_VLAN_TAG_FST);
4286
4287                 if (fs->m_ext.vlan_tci) {
4288                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4289                                 return -EINVAL;
4290                 }
4291         } else {
4292                 *unused |= BIT(INNER_VLAN_TAG_FST);
4293         }
4294
4295         if (fs->flow_type & FLOW_MAC_EXT) {
4296                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4297                         return -EOPNOTSUPP;
4298
4299                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4300                         *unused |= BIT(INNER_DST_MAC);
4301                 else
4302                         *unused &= ~(BIT(INNER_DST_MAC));
4303         }
4304
4305         return 0;
4306 }
4307
4308 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4309 {
4310         struct hclge_fd_rule *rule = NULL;
4311         struct hlist_node *node2;
4312
4313         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4314                 if (rule->location >= location)
4315                         break;
4316         }
4317
4318         return  rule && rule->location == location;
4319 }
4320
4321 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4322                                      struct hclge_fd_rule *new_rule,
4323                                      u16 location,
4324                                      bool is_add)
4325 {
4326         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4327         struct hlist_node *node2;
4328
4329         if (is_add && !new_rule)
4330                 return -EINVAL;
4331
4332         hlist_for_each_entry_safe(rule, node2,
4333                                   &hdev->fd_rule_list, rule_node) {
4334                 if (rule->location >= location)
4335                         break;
4336                 parent = rule;
4337         }
4338
4339         if (rule && rule->location == location) {
4340                 hlist_del(&rule->rule_node);
4341                 kfree(rule);
4342                 hdev->hclge_fd_rule_num--;
4343
4344                 if (!is_add)
4345                         return 0;
4346
4347         } else if (!is_add) {
4348                 dev_err(&hdev->pdev->dev,
4349                         "delete fail, rule %d is inexistent\n",
4350                         location);
4351                 return -EINVAL;
4352         }
4353
4354         INIT_HLIST_NODE(&new_rule->rule_node);
4355
4356         if (parent)
4357                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4358         else
4359                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4360
4361         hdev->hclge_fd_rule_num++;
4362
4363         return 0;
4364 }
4365
4366 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4367                               struct ethtool_rx_flow_spec *fs,
4368                               struct hclge_fd_rule *rule)
4369 {
4370         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4371
4372         switch (flow_type) {
4373         case SCTP_V4_FLOW:
4374         case TCP_V4_FLOW:
4375         case UDP_V4_FLOW:
4376                 rule->tuples.src_ip[3] =
4377                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4378                 rule->tuples_mask.src_ip[3] =
4379                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4380
4381                 rule->tuples.dst_ip[3] =
4382                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4383                 rule->tuples_mask.dst_ip[3] =
4384                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4385
4386                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4387                 rule->tuples_mask.src_port =
4388                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4389
4390                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4391                 rule->tuples_mask.dst_port =
4392                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4393
4394                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4395                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4396
4397                 rule->tuples.ether_proto = ETH_P_IP;
4398                 rule->tuples_mask.ether_proto = 0xFFFF;
4399
4400                 break;
4401         case IP_USER_FLOW:
4402                 rule->tuples.src_ip[3] =
4403                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4404                 rule->tuples_mask.src_ip[3] =
4405                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4406
4407                 rule->tuples.dst_ip[3] =
4408                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4409                 rule->tuples_mask.dst_ip[3] =
4410                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4411
4412                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4413                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4414
4415                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4416                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4417
4418                 rule->tuples.ether_proto = ETH_P_IP;
4419                 rule->tuples_mask.ether_proto = 0xFFFF;
4420
4421                 break;
4422         case SCTP_V6_FLOW:
4423         case TCP_V6_FLOW:
4424         case UDP_V6_FLOW:
4425                 be32_to_cpu_array(rule->tuples.src_ip,
4426                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
4427                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4428                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
4429
4430                 be32_to_cpu_array(rule->tuples.dst_ip,
4431                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
4432                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4433                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
4434
4435                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4436                 rule->tuples_mask.src_port =
4437                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4438
4439                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4440                 rule->tuples_mask.dst_port =
4441                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4442
4443                 rule->tuples.ether_proto = ETH_P_IPV6;
4444                 rule->tuples_mask.ether_proto = 0xFFFF;
4445
4446                 break;
4447         case IPV6_USER_FLOW:
4448                 be32_to_cpu_array(rule->tuples.src_ip,
4449                                   fs->h_u.usr_ip6_spec.ip6src, 4);
4450                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4451                                   fs->m_u.usr_ip6_spec.ip6src, 4);
4452
4453                 be32_to_cpu_array(rule->tuples.dst_ip,
4454                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
4455                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4456                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
4457
4458                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4459                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4460
4461                 rule->tuples.ether_proto = ETH_P_IPV6;
4462                 rule->tuples_mask.ether_proto = 0xFFFF;
4463
4464                 break;
4465         case ETHER_FLOW:
4466                 ether_addr_copy(rule->tuples.src_mac,
4467                                 fs->h_u.ether_spec.h_source);
4468                 ether_addr_copy(rule->tuples_mask.src_mac,
4469                                 fs->m_u.ether_spec.h_source);
4470
4471                 ether_addr_copy(rule->tuples.dst_mac,
4472                                 fs->h_u.ether_spec.h_dest);
4473                 ether_addr_copy(rule->tuples_mask.dst_mac,
4474                                 fs->m_u.ether_spec.h_dest);
4475
4476                 rule->tuples.ether_proto =
4477                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4478                 rule->tuples_mask.ether_proto =
4479                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4480
4481                 break;
4482         default:
4483                 return -EOPNOTSUPP;
4484         }
4485
4486         switch (flow_type) {
4487         case SCTP_V4_FLOW:
4488         case SCTP_V6_FLOW:
4489                 rule->tuples.ip_proto = IPPROTO_SCTP;
4490                 rule->tuples_mask.ip_proto = 0xFF;
4491                 break;
4492         case TCP_V4_FLOW:
4493         case TCP_V6_FLOW:
4494                 rule->tuples.ip_proto = IPPROTO_TCP;
4495                 rule->tuples_mask.ip_proto = 0xFF;
4496                 break;
4497         case UDP_V4_FLOW:
4498         case UDP_V6_FLOW:
4499                 rule->tuples.ip_proto = IPPROTO_UDP;
4500                 rule->tuples_mask.ip_proto = 0xFF;
4501                 break;
4502         default:
4503                 break;
4504         }
4505
4506         if ((fs->flow_type & FLOW_EXT)) {
4507                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4508                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4509         }
4510
4511         if (fs->flow_type & FLOW_MAC_EXT) {
4512                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4513                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4514         }
4515
4516         return 0;
4517 }
4518
4519 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4520                               struct ethtool_rxnfc *cmd)
4521 {
4522         struct hclge_vport *vport = hclge_get_vport(handle);
4523         struct hclge_dev *hdev = vport->back;
4524         u16 dst_vport_id = 0, q_index = 0;
4525         struct ethtool_rx_flow_spec *fs;
4526         struct hclge_fd_rule *rule;
4527         u32 unused = 0;
4528         u8 action;
4529         int ret;
4530
4531         if (!hnae3_dev_fd_supported(hdev))
4532                 return -EOPNOTSUPP;
4533
4534         if (!hdev->fd_cfg.fd_en) {
4535                 dev_warn(&hdev->pdev->dev,
4536                          "Please enable flow director first\n");
4537                 return -EOPNOTSUPP;
4538         }
4539
4540         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4541
4542         ret = hclge_fd_check_spec(hdev, fs, &unused);
4543         if (ret) {
4544                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4545                 return ret;
4546         }
4547
4548         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4549                 action = HCLGE_FD_ACTION_DROP_PACKET;
4550         } else {
4551                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4552                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4553                 u16 tqps;
4554
4555                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4556                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4557
4558                 if (ring >= tqps) {
4559                         dev_err(&hdev->pdev->dev,
4560                                 "Error: queue id (%d) > max tqp num (%d)\n",
4561                                 ring, tqps - 1);
4562                         return -EINVAL;
4563                 }
4564
4565                 if (vf > hdev->num_req_vfs) {
4566                         dev_err(&hdev->pdev->dev,
4567                                 "Error: vf id (%d) > max vf num (%d)\n",
4568                                 vf, hdev->num_req_vfs);
4569                         return -EINVAL;
4570                 }
4571
4572                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4573                 q_index = ring;
4574         }
4575
4576         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4577         if (!rule)
4578                 return -ENOMEM;
4579
4580         ret = hclge_fd_get_tuple(hdev, fs, rule);
4581         if (ret)
4582                 goto free_rule;
4583
4584         rule->flow_type = fs->flow_type;
4585
4586         rule->location = fs->location;
4587         rule->unused_tuple = unused;
4588         rule->vf_id = dst_vport_id;
4589         rule->queue_id = q_index;
4590         rule->action = action;
4591
4592         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4593         if (ret)
4594                 goto free_rule;
4595
4596         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4597         if (ret)
4598                 goto free_rule;
4599
4600         ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4601         if (ret)
4602                 goto free_rule;
4603
4604         return ret;
4605
4606 free_rule:
4607         kfree(rule);
4608         return ret;
4609 }
4610
4611 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4612                               struct ethtool_rxnfc *cmd)
4613 {
4614         struct hclge_vport *vport = hclge_get_vport(handle);
4615         struct hclge_dev *hdev = vport->back;
4616         struct ethtool_rx_flow_spec *fs;
4617         int ret;
4618
4619         if (!hnae3_dev_fd_supported(hdev))
4620                 return -EOPNOTSUPP;
4621
4622         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4623
4624         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4625                 return -EINVAL;
4626
4627         if (!hclge_fd_rule_exist(hdev, fs->location)) {
4628                 dev_err(&hdev->pdev->dev,
4629                         "Delete fail, rule %d is inexistent\n",
4630                         fs->location);
4631                 return -ENOENT;
4632         }
4633
4634         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4635                                    fs->location, NULL, false);
4636         if (ret)
4637                 return ret;
4638
4639         return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4640                                          false);
4641 }
4642
4643 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4644                                      bool clear_list)
4645 {
4646         struct hclge_vport *vport = hclge_get_vport(handle);
4647         struct hclge_dev *hdev = vport->back;
4648         struct hclge_fd_rule *rule;
4649         struct hlist_node *node;
4650
4651         if (!hnae3_dev_fd_supported(hdev))
4652                 return;
4653
4654         if (clear_list) {
4655                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4656                                           rule_node) {
4657                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4658                                              rule->location, NULL, false);
4659                         hlist_del(&rule->rule_node);
4660                         kfree(rule);
4661                         hdev->hclge_fd_rule_num--;
4662                 }
4663         } else {
4664                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4665                                           rule_node)
4666                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4667                                              rule->location, NULL, false);
4668         }
4669 }
4670
4671 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4672 {
4673         struct hclge_vport *vport = hclge_get_vport(handle);
4674         struct hclge_dev *hdev = vport->back;
4675         struct hclge_fd_rule *rule;
4676         struct hlist_node *node;
4677         int ret;
4678
4679         /* Return ok here, because reset error handling will check this
4680          * return value. If error is returned here, the reset process will
4681          * fail.
4682          */
4683         if (!hnae3_dev_fd_supported(hdev))
4684                 return 0;
4685
4686         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4687                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4688                 if (!ret)
4689                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4690
4691                 if (ret) {
4692                         dev_warn(&hdev->pdev->dev,
4693                                  "Restore rule %d failed, remove it\n",
4694                                  rule->location);
4695                         hlist_del(&rule->rule_node);
4696                         kfree(rule);
4697                         hdev->hclge_fd_rule_num--;
4698                 }
4699         }
4700         return 0;
4701 }
4702
4703 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4704                                  struct ethtool_rxnfc *cmd)
4705 {
4706         struct hclge_vport *vport = hclge_get_vport(handle);
4707         struct hclge_dev *hdev = vport->back;
4708
4709         if (!hnae3_dev_fd_supported(hdev))
4710                 return -EOPNOTSUPP;
4711
4712         cmd->rule_cnt = hdev->hclge_fd_rule_num;
4713         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4714
4715         return 0;
4716 }
4717
4718 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4719                                   struct ethtool_rxnfc *cmd)
4720 {
4721         struct hclge_vport *vport = hclge_get_vport(handle);
4722         struct hclge_fd_rule *rule = NULL;
4723         struct hclge_dev *hdev = vport->back;
4724         struct ethtool_rx_flow_spec *fs;
4725         struct hlist_node *node2;
4726
4727         if (!hnae3_dev_fd_supported(hdev))
4728                 return -EOPNOTSUPP;
4729
4730         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4731
4732         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4733                 if (rule->location >= fs->location)
4734                         break;
4735         }
4736
4737         if (!rule || fs->location != rule->location)
4738                 return -ENOENT;
4739
4740         fs->flow_type = rule->flow_type;
4741         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4742         case SCTP_V4_FLOW:
4743         case TCP_V4_FLOW:
4744         case UDP_V4_FLOW:
4745                 fs->h_u.tcp_ip4_spec.ip4src =
4746                                 cpu_to_be32(rule->tuples.src_ip[3]);
4747                 fs->m_u.tcp_ip4_spec.ip4src =
4748                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4749                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4750
4751                 fs->h_u.tcp_ip4_spec.ip4dst =
4752                                 cpu_to_be32(rule->tuples.dst_ip[3]);
4753                 fs->m_u.tcp_ip4_spec.ip4dst =
4754                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
4755                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4756
4757                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4758                 fs->m_u.tcp_ip4_spec.psrc =
4759                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4760                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
4761
4762                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4763                 fs->m_u.tcp_ip4_spec.pdst =
4764                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
4765                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
4766
4767                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
4768                 fs->m_u.tcp_ip4_spec.tos =
4769                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4770                                 0 : rule->tuples_mask.ip_tos;
4771
4772                 break;
4773         case IP_USER_FLOW:
4774                 fs->h_u.usr_ip4_spec.ip4src =
4775                                 cpu_to_be32(rule->tuples.src_ip[3]);
4776                 fs->m_u.tcp_ip4_spec.ip4src =
4777                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4778                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4779
4780                 fs->h_u.usr_ip4_spec.ip4dst =
4781                                 cpu_to_be32(rule->tuples.dst_ip[3]);
4782                 fs->m_u.usr_ip4_spec.ip4dst =
4783                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
4784                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4785
4786                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
4787                 fs->m_u.usr_ip4_spec.tos =
4788                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4789                                 0 : rule->tuples_mask.ip_tos;
4790
4791                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
4792                 fs->m_u.usr_ip4_spec.proto =
4793                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4794                                 0 : rule->tuples_mask.ip_proto;
4795
4796                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
4797
4798                 break;
4799         case SCTP_V6_FLOW:
4800         case TCP_V6_FLOW:
4801         case UDP_V6_FLOW:
4802                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
4803                                   rule->tuples.src_ip, 4);
4804                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
4805                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
4806                 else
4807                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
4808                                           rule->tuples_mask.src_ip, 4);
4809
4810                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
4811                                   rule->tuples.dst_ip, 4);
4812                 if (rule->unused_tuple & BIT(INNER_DST_IP))
4813                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4814                 else
4815                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
4816                                           rule->tuples_mask.dst_ip, 4);
4817
4818                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4819                 fs->m_u.tcp_ip6_spec.psrc =
4820                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4821                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
4822
4823                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4824                 fs->m_u.tcp_ip6_spec.pdst =
4825                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
4826                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
4827
4828                 break;
4829         case IPV6_USER_FLOW:
4830                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
4831                                   rule->tuples.src_ip, 4);
4832                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
4833                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
4834                 else
4835                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
4836                                           rule->tuples_mask.src_ip, 4);
4837
4838                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
4839                                   rule->tuples.dst_ip, 4);
4840                 if (rule->unused_tuple & BIT(INNER_DST_IP))
4841                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4842                 else
4843                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
4844                                           rule->tuples_mask.dst_ip, 4);
4845
4846                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
4847                 fs->m_u.usr_ip6_spec.l4_proto =
4848                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4849                                 0 : rule->tuples_mask.ip_proto;
4850
4851                 break;
4852         case ETHER_FLOW:
4853                 ether_addr_copy(fs->h_u.ether_spec.h_source,
4854                                 rule->tuples.src_mac);
4855                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
4856                         eth_zero_addr(fs->m_u.ether_spec.h_source);
4857                 else
4858                         ether_addr_copy(fs->m_u.ether_spec.h_source,
4859                                         rule->tuples_mask.src_mac);
4860
4861                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
4862                                 rule->tuples.dst_mac);
4863                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
4864                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
4865                 else
4866                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
4867                                         rule->tuples_mask.dst_mac);
4868
4869                 fs->h_u.ether_spec.h_proto =
4870                                 cpu_to_be16(rule->tuples.ether_proto);
4871                 fs->m_u.ether_spec.h_proto =
4872                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
4873                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
4874
4875                 break;
4876         default:
4877                 return -EOPNOTSUPP;
4878         }
4879
4880         if (fs->flow_type & FLOW_EXT) {
4881                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
4882                 fs->m_ext.vlan_tci =
4883                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
4884                                 cpu_to_be16(VLAN_VID_MASK) :
4885                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
4886         }
4887
4888         if (fs->flow_type & FLOW_MAC_EXT) {
4889                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
4890                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
4891                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
4892                 else
4893                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
4894                                         rule->tuples_mask.dst_mac);
4895         }
4896
4897         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4898                 fs->ring_cookie = RX_CLS_FLOW_DISC;
4899         } else {
4900                 u64 vf_id;
4901
4902                 fs->ring_cookie = rule->queue_id;
4903                 vf_id = rule->vf_id;
4904                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
4905                 fs->ring_cookie |= vf_id;
4906         }
4907
4908         return 0;
4909 }
4910
4911 static int hclge_get_all_rules(struct hnae3_handle *handle,
4912                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
4913 {
4914         struct hclge_vport *vport = hclge_get_vport(handle);
4915         struct hclge_dev *hdev = vport->back;
4916         struct hclge_fd_rule *rule;
4917         struct hlist_node *node2;
4918         int cnt = 0;
4919
4920         if (!hnae3_dev_fd_supported(hdev))
4921                 return -EOPNOTSUPP;
4922
4923         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4924
4925         hlist_for_each_entry_safe(rule, node2,
4926                                   &hdev->fd_rule_list, rule_node) {
4927                 if (cnt == cmd->rule_cnt)
4928                         return -EMSGSIZE;
4929
4930                 rule_locs[cnt] = rule->location;
4931                 cnt++;
4932         }
4933
4934         cmd->rule_cnt = cnt;
4935
4936         return 0;
4937 }
4938
4939 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
4940 {
4941         struct hclge_vport *vport = hclge_get_vport(handle);
4942         struct hclge_dev *hdev = vport->back;
4943
4944         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
4945                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
4946 }
4947
4948 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
4949 {
4950         struct hclge_vport *vport = hclge_get_vport(handle);
4951         struct hclge_dev *hdev = vport->back;
4952
4953         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4954 }
4955
4956 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
4957 {
4958         struct hclge_vport *vport = hclge_get_vport(handle);
4959         struct hclge_dev *hdev = vport->back;
4960
4961         return hdev->reset_count;
4962 }
4963
4964 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
4965 {
4966         struct hclge_vport *vport = hclge_get_vport(handle);
4967         struct hclge_dev *hdev = vport->back;
4968
4969         hdev->fd_cfg.fd_en = enable;
4970         if (!enable)
4971                 hclge_del_all_fd_entries(handle, false);
4972         else
4973                 hclge_restore_fd_entries(handle);
4974 }
4975
4976 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
4977 {
4978         struct hclge_desc desc;
4979         struct hclge_config_mac_mode_cmd *req =
4980                 (struct hclge_config_mac_mode_cmd *)desc.data;
4981         u32 loop_en = 0;
4982         int ret;
4983
4984         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
4985         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
4986         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
4987         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
4988         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
4989         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
4990         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
4991         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
4992         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
4993         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
4994         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
4995         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
4996         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
4997         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
4998         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
4999         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5000
5001         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5002         if (ret)
5003                 dev_err(&hdev->pdev->dev,
5004                         "mac enable fail, ret =%d.\n", ret);
5005 }
5006
5007 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5008 {
5009         struct hclge_config_mac_mode_cmd *req;
5010         struct hclge_desc desc;
5011         u32 loop_en;
5012         int ret;
5013
5014         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5015         /* 1 Read out the MAC mode config at first */
5016         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5017         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5018         if (ret) {
5019                 dev_err(&hdev->pdev->dev,
5020                         "mac loopback get fail, ret =%d.\n", ret);
5021                 return ret;
5022         }
5023
5024         /* 2 Then setup the loopback flag */
5025         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5026         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5027         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5028         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5029
5030         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5031
5032         /* 3 Config mac work mode with loopback flag
5033          * and its original configure parameters
5034          */
5035         hclge_cmd_reuse_desc(&desc, false);
5036         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5037         if (ret)
5038                 dev_err(&hdev->pdev->dev,
5039                         "mac loopback set fail, ret =%d.\n", ret);
5040         return ret;
5041 }
5042
5043 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5044                                      enum hnae3_loop loop_mode)
5045 {
5046 #define HCLGE_SERDES_RETRY_MS   10
5047 #define HCLGE_SERDES_RETRY_NUM  100
5048         struct hclge_serdes_lb_cmd *req;
5049         struct hclge_desc desc;
5050         int ret, i = 0;
5051         u8 loop_mode_b;
5052
5053         req = (struct hclge_serdes_lb_cmd *)desc.data;
5054         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5055
5056         switch (loop_mode) {
5057         case HNAE3_LOOP_SERIAL_SERDES:
5058                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5059                 break;
5060         case HNAE3_LOOP_PARALLEL_SERDES:
5061                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5062                 break;
5063         default:
5064                 dev_err(&hdev->pdev->dev,
5065                         "unsupported serdes loopback mode %d\n", loop_mode);
5066                 return -ENOTSUPP;
5067         }
5068
5069         if (en) {
5070                 req->enable = loop_mode_b;
5071                 req->mask = loop_mode_b;
5072         } else {
5073                 req->mask = loop_mode_b;
5074         }
5075
5076         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5077         if (ret) {
5078                 dev_err(&hdev->pdev->dev,
5079                         "serdes loopback set fail, ret = %d\n", ret);
5080                 return ret;
5081         }
5082
5083         do {
5084                 msleep(HCLGE_SERDES_RETRY_MS);
5085                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5086                                            true);
5087                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5088                 if (ret) {
5089                         dev_err(&hdev->pdev->dev,
5090                                 "serdes loopback get, ret = %d\n", ret);
5091                         return ret;
5092                 }
5093         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5094                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5095
5096         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5097                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5098                 return -EBUSY;
5099         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5100                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5101                 return -EIO;
5102         }
5103
5104         hclge_cfg_mac_mode(hdev, en);
5105         return 0;
5106 }
5107
5108 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5109                             int stream_id, bool enable)
5110 {
5111         struct hclge_desc desc;
5112         struct hclge_cfg_com_tqp_queue_cmd *req =
5113                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5114         int ret;
5115
5116         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5117         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5118         req->stream_id = cpu_to_le16(stream_id);
5119         req->enable |= enable << HCLGE_TQP_ENABLE_B;
5120
5121         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5122         if (ret)
5123                 dev_err(&hdev->pdev->dev,
5124                         "Tqp enable fail, status =%d.\n", ret);
5125         return ret;
5126 }
5127
5128 static int hclge_set_loopback(struct hnae3_handle *handle,
5129                               enum hnae3_loop loop_mode, bool en)
5130 {
5131         struct hclge_vport *vport = hclge_get_vport(handle);
5132         struct hclge_dev *hdev = vport->back;
5133         int i, ret;
5134
5135         switch (loop_mode) {
5136         case HNAE3_LOOP_APP:
5137                 ret = hclge_set_app_loopback(hdev, en);
5138                 break;
5139         case HNAE3_LOOP_SERIAL_SERDES:
5140         case HNAE3_LOOP_PARALLEL_SERDES:
5141                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5142                 break;
5143         default:
5144                 ret = -ENOTSUPP;
5145                 dev_err(&hdev->pdev->dev,
5146                         "loop_mode %d is not supported\n", loop_mode);
5147                 break;
5148         }
5149
5150         for (i = 0; i < vport->alloc_tqps; i++) {
5151                 ret = hclge_tqp_enable(hdev, i, 0, en);
5152                 if (ret)
5153                         return ret;
5154         }
5155
5156         return 0;
5157 }
5158
5159 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5160 {
5161         struct hclge_vport *vport = hclge_get_vport(handle);
5162         struct hnae3_queue *queue;
5163         struct hclge_tqp *tqp;
5164         int i;
5165
5166         for (i = 0; i < vport->alloc_tqps; i++) {
5167                 queue = handle->kinfo.tqp[i];
5168                 tqp = container_of(queue, struct hclge_tqp, q);
5169                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5170         }
5171 }
5172
5173 static int hclge_ae_start(struct hnae3_handle *handle)
5174 {
5175         struct hclge_vport *vport = hclge_get_vport(handle);
5176         struct hclge_dev *hdev = vport->back;
5177
5178         /* mac enable */
5179         hclge_cfg_mac_mode(hdev, true);
5180         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5181         mod_timer(&hdev->service_timer, jiffies + HZ);
5182         hdev->hw.mac.link = 0;
5183
5184         /* reset tqp stats */
5185         hclge_reset_tqp_stats(handle);
5186
5187         hclge_mac_start_phy(hdev);
5188
5189         return 0;
5190 }
5191
5192 static void hclge_ae_stop(struct hnae3_handle *handle)
5193 {
5194         struct hclge_vport *vport = hclge_get_vport(handle);
5195         struct hclge_dev *hdev = vport->back;
5196
5197         set_bit(HCLGE_STATE_DOWN, &hdev->state);
5198
5199         del_timer_sync(&hdev->service_timer);
5200         cancel_work_sync(&hdev->service_task);
5201         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5202
5203         /* If it is not PF reset, the firmware will disable the MAC,
5204          * so it only need to stop phy here.
5205          */
5206         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5207             hdev->reset_type != HNAE3_FUNC_RESET) {
5208                 hclge_mac_stop_phy(hdev);
5209                 return;
5210         }
5211
5212         /* Mac disable */
5213         hclge_cfg_mac_mode(hdev, false);
5214
5215         hclge_mac_stop_phy(hdev);
5216
5217         /* reset tqp stats */
5218         hclge_reset_tqp_stats(handle);
5219         del_timer_sync(&hdev->service_timer);
5220         cancel_work_sync(&hdev->service_task);
5221         hclge_update_link_status(hdev);
5222 }
5223
5224 int hclge_vport_start(struct hclge_vport *vport)
5225 {
5226         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5227         vport->last_active_jiffies = jiffies;
5228         return 0;
5229 }
5230
5231 void hclge_vport_stop(struct hclge_vport *vport)
5232 {
5233         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5234 }
5235
5236 static int hclge_client_start(struct hnae3_handle *handle)
5237 {
5238         struct hclge_vport *vport = hclge_get_vport(handle);
5239
5240         return hclge_vport_start(vport);
5241 }
5242
5243 static void hclge_client_stop(struct hnae3_handle *handle)
5244 {
5245         struct hclge_vport *vport = hclge_get_vport(handle);
5246
5247         hclge_vport_stop(vport);
5248 }
5249
5250 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5251                                          u16 cmdq_resp, u8  resp_code,
5252                                          enum hclge_mac_vlan_tbl_opcode op)
5253 {
5254         struct hclge_dev *hdev = vport->back;
5255         int return_status = -EIO;
5256
5257         if (cmdq_resp) {
5258                 dev_err(&hdev->pdev->dev,
5259                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5260                         cmdq_resp);
5261                 return -EIO;
5262         }
5263
5264         if (op == HCLGE_MAC_VLAN_ADD) {
5265                 if ((!resp_code) || (resp_code == 1)) {
5266                         return_status = 0;
5267                 } else if (resp_code == 2) {
5268                         return_status = -ENOSPC;
5269                         dev_err(&hdev->pdev->dev,
5270                                 "add mac addr failed for uc_overflow.\n");
5271                 } else if (resp_code == 3) {
5272                         return_status = -ENOSPC;
5273                         dev_err(&hdev->pdev->dev,
5274                                 "add mac addr failed for mc_overflow.\n");
5275                 } else {
5276                         dev_err(&hdev->pdev->dev,
5277                                 "add mac addr failed for undefined, code=%d.\n",
5278                                 resp_code);
5279                 }
5280         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5281                 if (!resp_code) {
5282                         return_status = 0;
5283                 } else if (resp_code == 1) {
5284                         return_status = -ENOENT;
5285                         dev_dbg(&hdev->pdev->dev,
5286                                 "remove mac addr failed for miss.\n");
5287                 } else {
5288                         dev_err(&hdev->pdev->dev,
5289                                 "remove mac addr failed for undefined, code=%d.\n",
5290                                 resp_code);
5291                 }
5292         } else if (op == HCLGE_MAC_VLAN_LKUP) {
5293                 if (!resp_code) {
5294                         return_status = 0;
5295                 } else if (resp_code == 1) {
5296                         return_status = -ENOENT;
5297                         dev_dbg(&hdev->pdev->dev,
5298                                 "lookup mac addr failed for miss.\n");
5299                 } else {
5300                         dev_err(&hdev->pdev->dev,
5301                                 "lookup mac addr failed for undefined, code=%d.\n",
5302                                 resp_code);
5303                 }
5304         } else {
5305                 return_status = -EINVAL;
5306                 dev_err(&hdev->pdev->dev,
5307                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5308                         op);
5309         }
5310
5311         return return_status;
5312 }
5313
5314 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5315 {
5316         int word_num;
5317         int bit_num;
5318
5319         if (vfid > 255 || vfid < 0)
5320                 return -EIO;
5321
5322         if (vfid >= 0 && vfid <= 191) {
5323                 word_num = vfid / 32;
5324                 bit_num  = vfid % 32;
5325                 if (clr)
5326                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5327                 else
5328                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5329         } else {
5330                 word_num = (vfid - 192) / 32;
5331                 bit_num  = vfid % 32;
5332                 if (clr)
5333                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5334                 else
5335                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5336         }
5337
5338         return 0;
5339 }
5340
5341 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5342 {
5343 #define HCLGE_DESC_NUMBER 3
5344 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5345         int i, j;
5346
5347         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5348                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5349                         if (desc[i].data[j])
5350                                 return false;
5351
5352         return true;
5353 }
5354
5355 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5356                                    const u8 *addr)
5357 {
5358         const unsigned char *mac_addr = addr;
5359         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5360                        (mac_addr[0]) | (mac_addr[1] << 8);
5361         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5362
5363         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5364         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5365 }
5366
5367 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5368                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
5369 {
5370         struct hclge_dev *hdev = vport->back;
5371         struct hclge_desc desc;
5372         u8 resp_code;
5373         u16 retval;
5374         int ret;
5375
5376         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5377
5378         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5379
5380         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5381         if (ret) {
5382                 dev_err(&hdev->pdev->dev,
5383                         "del mac addr failed for cmd_send, ret =%d.\n",
5384                         ret);
5385                 return ret;
5386         }
5387         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5388         retval = le16_to_cpu(desc.retval);
5389
5390         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5391                                              HCLGE_MAC_VLAN_REMOVE);
5392 }
5393
5394 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5395                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
5396                                      struct hclge_desc *desc,
5397                                      bool is_mc)
5398 {
5399         struct hclge_dev *hdev = vport->back;
5400         u8 resp_code;
5401         u16 retval;
5402         int ret;
5403
5404         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5405         if (is_mc) {
5406                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5407                 memcpy(desc[0].data,
5408                        req,
5409                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5410                 hclge_cmd_setup_basic_desc(&desc[1],
5411                                            HCLGE_OPC_MAC_VLAN_ADD,
5412                                            true);
5413                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5414                 hclge_cmd_setup_basic_desc(&desc[2],
5415                                            HCLGE_OPC_MAC_VLAN_ADD,
5416                                            true);
5417                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5418         } else {
5419                 memcpy(desc[0].data,
5420                        req,
5421                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5422                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5423         }
5424         if (ret) {
5425                 dev_err(&hdev->pdev->dev,
5426                         "lookup mac addr failed for cmd_send, ret =%d.\n",
5427                         ret);
5428                 return ret;
5429         }
5430         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5431         retval = le16_to_cpu(desc[0].retval);
5432
5433         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5434                                              HCLGE_MAC_VLAN_LKUP);
5435 }
5436
5437 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5438                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
5439                                   struct hclge_desc *mc_desc)
5440 {
5441         struct hclge_dev *hdev = vport->back;
5442         int cfg_status;
5443         u8 resp_code;
5444         u16 retval;
5445         int ret;
5446
5447         if (!mc_desc) {
5448                 struct hclge_desc desc;
5449
5450                 hclge_cmd_setup_basic_desc(&desc,
5451                                            HCLGE_OPC_MAC_VLAN_ADD,
5452                                            false);
5453                 memcpy(desc.data, req,
5454                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5455                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5456                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5457                 retval = le16_to_cpu(desc.retval);
5458
5459                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5460                                                            resp_code,
5461                                                            HCLGE_MAC_VLAN_ADD);
5462         } else {
5463                 hclge_cmd_reuse_desc(&mc_desc[0], false);
5464                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5465                 hclge_cmd_reuse_desc(&mc_desc[1], false);
5466                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5467                 hclge_cmd_reuse_desc(&mc_desc[2], false);
5468                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5469                 memcpy(mc_desc[0].data, req,
5470                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5471                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5472                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5473                 retval = le16_to_cpu(mc_desc[0].retval);
5474
5475                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5476                                                            resp_code,
5477                                                            HCLGE_MAC_VLAN_ADD);
5478         }
5479
5480         if (ret) {
5481                 dev_err(&hdev->pdev->dev,
5482                         "add mac addr failed for cmd_send, ret =%d.\n",
5483                         ret);
5484                 return ret;
5485         }
5486
5487         return cfg_status;
5488 }
5489
5490 static int hclge_init_umv_space(struct hclge_dev *hdev)
5491 {
5492         u16 allocated_size = 0;
5493         int ret;
5494
5495         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5496                                   true);
5497         if (ret)
5498                 return ret;
5499
5500         if (allocated_size < hdev->wanted_umv_size)
5501                 dev_warn(&hdev->pdev->dev,
5502                          "Alloc umv space failed, want %d, get %d\n",
5503                          hdev->wanted_umv_size, allocated_size);
5504
5505         mutex_init(&hdev->umv_mutex);
5506         hdev->max_umv_size = allocated_size;
5507         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5508         hdev->share_umv_size = hdev->priv_umv_size +
5509                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
5510
5511         return 0;
5512 }
5513
5514 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5515 {
5516         int ret;
5517
5518         if (hdev->max_umv_size > 0) {
5519                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5520                                           false);
5521                 if (ret)
5522                         return ret;
5523                 hdev->max_umv_size = 0;
5524         }
5525         mutex_destroy(&hdev->umv_mutex);
5526
5527         return 0;
5528 }
5529
5530 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5531                                u16 *allocated_size, bool is_alloc)
5532 {
5533         struct hclge_umv_spc_alc_cmd *req;
5534         struct hclge_desc desc;
5535         int ret;
5536
5537         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5538         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5539         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5540         req->space_size = cpu_to_le32(space_size);
5541
5542         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5543         if (ret) {
5544                 dev_err(&hdev->pdev->dev,
5545                         "%s umv space failed for cmd_send, ret =%d\n",
5546                         is_alloc ? "allocate" : "free", ret);
5547                 return ret;
5548         }
5549
5550         if (is_alloc && allocated_size)
5551                 *allocated_size = le32_to_cpu(desc.data[1]);
5552
5553         return 0;
5554 }
5555
5556 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5557 {
5558         struct hclge_vport *vport;
5559         int i;
5560
5561         for (i = 0; i < hdev->num_alloc_vport; i++) {
5562                 vport = &hdev->vport[i];
5563                 vport->used_umv_num = 0;
5564         }
5565
5566         mutex_lock(&hdev->umv_mutex);
5567         hdev->share_umv_size = hdev->priv_umv_size +
5568                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
5569         mutex_unlock(&hdev->umv_mutex);
5570 }
5571
5572 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5573 {
5574         struct hclge_dev *hdev = vport->back;
5575         bool is_full;
5576
5577         mutex_lock(&hdev->umv_mutex);
5578         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5579                    hdev->share_umv_size == 0);
5580         mutex_unlock(&hdev->umv_mutex);
5581
5582         return is_full;
5583 }
5584
5585 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5586 {
5587         struct hclge_dev *hdev = vport->back;
5588
5589         mutex_lock(&hdev->umv_mutex);
5590         if (is_free) {
5591                 if (vport->used_umv_num > hdev->priv_umv_size)
5592                         hdev->share_umv_size++;
5593                 vport->used_umv_num--;
5594         } else {
5595                 if (vport->used_umv_num >= hdev->priv_umv_size)
5596                         hdev->share_umv_size--;
5597                 vport->used_umv_num++;
5598         }
5599         mutex_unlock(&hdev->umv_mutex);
5600 }
5601
5602 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5603                              const unsigned char *addr)
5604 {
5605         struct hclge_vport *vport = hclge_get_vport(handle);
5606
5607         return hclge_add_uc_addr_common(vport, addr);
5608 }
5609
5610 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5611                              const unsigned char *addr)
5612 {
5613         struct hclge_dev *hdev = vport->back;
5614         struct hclge_mac_vlan_tbl_entry_cmd req;
5615         struct hclge_desc desc;
5616         u16 egress_port = 0;
5617         int ret;
5618
5619         /* mac addr check */
5620         if (is_zero_ether_addr(addr) ||
5621             is_broadcast_ether_addr(addr) ||
5622             is_multicast_ether_addr(addr)) {
5623                 dev_err(&hdev->pdev->dev,
5624                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5625                          addr,
5626                          is_zero_ether_addr(addr),
5627                          is_broadcast_ether_addr(addr),
5628                          is_multicast_ether_addr(addr));
5629                 return -EINVAL;
5630         }
5631
5632         memset(&req, 0, sizeof(req));
5633         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5634
5635         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5636                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5637
5638         req.egress_port = cpu_to_le16(egress_port);
5639
5640         hclge_prepare_mac_addr(&req, addr);
5641
5642         /* Lookup the mac address in the mac_vlan table, and add
5643          * it if the entry is inexistent. Repeated unicast entry
5644          * is not allowed in the mac vlan table.
5645          */
5646         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5647         if (ret == -ENOENT) {
5648                 if (!hclge_is_umv_space_full(vport)) {
5649                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5650                         if (!ret)
5651                                 hclge_update_umv_space(vport, false);
5652                         return ret;
5653                 }
5654
5655                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5656                         hdev->priv_umv_size);
5657
5658                 return -ENOSPC;
5659         }
5660
5661         /* check if we just hit the duplicate */
5662         if (!ret)
5663                 ret = -EINVAL;
5664
5665         dev_err(&hdev->pdev->dev,
5666                 "PF failed to add unicast entry(%pM) in the MAC table\n",
5667                 addr);
5668
5669         return ret;
5670 }
5671
5672 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5673                             const unsigned char *addr)
5674 {
5675         struct hclge_vport *vport = hclge_get_vport(handle);
5676
5677         return hclge_rm_uc_addr_common(vport, addr);
5678 }
5679
5680 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5681                             const unsigned char *addr)
5682 {
5683         struct hclge_dev *hdev = vport->back;
5684         struct hclge_mac_vlan_tbl_entry_cmd req;
5685         int ret;
5686
5687         /* mac addr check */
5688         if (is_zero_ether_addr(addr) ||
5689             is_broadcast_ether_addr(addr) ||
5690             is_multicast_ether_addr(addr)) {
5691                 dev_dbg(&hdev->pdev->dev,
5692                         "Remove mac err! invalid mac:%pM.\n",
5693                          addr);
5694                 return -EINVAL;
5695         }
5696
5697         memset(&req, 0, sizeof(req));
5698         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5699         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5700         hclge_prepare_mac_addr(&req, addr);
5701         ret = hclge_remove_mac_vlan_tbl(vport, &req);
5702         if (!ret)
5703                 hclge_update_umv_space(vport, true);
5704
5705         return ret;
5706 }
5707
5708 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5709                              const unsigned char *addr)
5710 {
5711         struct hclge_vport *vport = hclge_get_vport(handle);
5712
5713         return hclge_add_mc_addr_common(vport, addr);
5714 }
5715
5716 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5717                              const unsigned char *addr)
5718 {
5719         struct hclge_dev *hdev = vport->back;
5720         struct hclge_mac_vlan_tbl_entry_cmd req;
5721         struct hclge_desc desc[3];
5722         int status;
5723
5724         /* mac addr check */
5725         if (!is_multicast_ether_addr(addr)) {
5726                 dev_err(&hdev->pdev->dev,
5727                         "Add mc mac err! invalid mac:%pM.\n",
5728                          addr);
5729                 return -EINVAL;
5730         }
5731         memset(&req, 0, sizeof(req));
5732         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5733         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5734         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5735         hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5736         hclge_prepare_mac_addr(&req, addr);
5737         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5738         if (!status) {
5739                 /* This mac addr exist, update VFID for it */
5740                 hclge_update_desc_vfid(desc, vport->vport_id, false);
5741                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5742         } else {
5743                 /* This mac addr do not exist, add new entry for it */
5744                 memset(desc[0].data, 0, sizeof(desc[0].data));
5745                 memset(desc[1].data, 0, sizeof(desc[0].data));
5746                 memset(desc[2].data, 0, sizeof(desc[0].data));
5747                 hclge_update_desc_vfid(desc, vport->vport_id, false);
5748                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5749         }
5750
5751         if (status == -ENOSPC)
5752                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
5753
5754         return status;
5755 }
5756
5757 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
5758                             const unsigned char *addr)
5759 {
5760         struct hclge_vport *vport = hclge_get_vport(handle);
5761
5762         return hclge_rm_mc_addr_common(vport, addr);
5763 }
5764
5765 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
5766                             const unsigned char *addr)
5767 {
5768         struct hclge_dev *hdev = vport->back;
5769         struct hclge_mac_vlan_tbl_entry_cmd req;
5770         enum hclge_cmd_status status;
5771         struct hclge_desc desc[3];
5772
5773         /* mac addr check */
5774         if (!is_multicast_ether_addr(addr)) {
5775                 dev_dbg(&hdev->pdev->dev,
5776                         "Remove mc mac err! invalid mac:%pM.\n",
5777                          addr);
5778                 return -EINVAL;
5779         }
5780
5781         memset(&req, 0, sizeof(req));
5782         hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5783         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5784         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5785         hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5786         hclge_prepare_mac_addr(&req, addr);
5787         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5788         if (!status) {
5789                 /* This mac addr exist, remove this handle's VFID for it */
5790                 hclge_update_desc_vfid(desc, vport->vport_id, true);
5791
5792                 if (hclge_is_all_function_id_zero(desc))
5793                         /* All the vfid is zero, so need to delete this entry */
5794                         status = hclge_remove_mac_vlan_tbl(vport, &req);
5795                 else
5796                         /* Not all the vfid is zero, update the vfid */
5797                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5798
5799         } else {
5800                 /* Maybe this mac address is in mta table, but it cannot be
5801                  * deleted here because an entry of mta represents an address
5802                  * range rather than a specific address. the delete action to
5803                  * all entries will take effect in update_mta_status called by
5804                  * hns3_nic_set_rx_mode.
5805                  */
5806                 status = 0;
5807         }
5808
5809         return status;
5810 }
5811
5812 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
5813                                               u16 cmdq_resp, u8 resp_code)
5814 {
5815 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
5816 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
5817 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
5818 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
5819
5820         int return_status;
5821
5822         if (cmdq_resp) {
5823                 dev_err(&hdev->pdev->dev,
5824                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
5825                         cmdq_resp);
5826                 return -EIO;
5827         }
5828
5829         switch (resp_code) {
5830         case HCLGE_ETHERTYPE_SUCCESS_ADD:
5831         case HCLGE_ETHERTYPE_ALREADY_ADD:
5832                 return_status = 0;
5833                 break;
5834         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
5835                 dev_err(&hdev->pdev->dev,
5836                         "add mac ethertype failed for manager table overflow.\n");
5837                 return_status = -EIO;
5838                 break;
5839         case HCLGE_ETHERTYPE_KEY_CONFLICT:
5840                 dev_err(&hdev->pdev->dev,
5841                         "add mac ethertype failed for key conflict.\n");
5842                 return_status = -EIO;
5843                 break;
5844         default:
5845                 dev_err(&hdev->pdev->dev,
5846                         "add mac ethertype failed for undefined, code=%d.\n",
5847                         resp_code);
5848                 return_status = -EIO;
5849         }
5850
5851         return return_status;
5852 }
5853
5854 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
5855                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
5856 {
5857         struct hclge_desc desc;
5858         u8 resp_code;
5859         u16 retval;
5860         int ret;
5861
5862         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
5863         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
5864
5865         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5866         if (ret) {
5867                 dev_err(&hdev->pdev->dev,
5868                         "add mac ethertype failed for cmd_send, ret =%d.\n",
5869                         ret);
5870                 return ret;
5871         }
5872
5873         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5874         retval = le16_to_cpu(desc.retval);
5875
5876         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
5877 }
5878
5879 static int init_mgr_tbl(struct hclge_dev *hdev)
5880 {
5881         int ret;
5882         int i;
5883
5884         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
5885                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
5886                 if (ret) {
5887                         dev_err(&hdev->pdev->dev,
5888                                 "add mac ethertype failed, ret =%d.\n",
5889                                 ret);
5890                         return ret;
5891                 }
5892         }
5893
5894         return 0;
5895 }
5896
5897 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
5898 {
5899         struct hclge_vport *vport = hclge_get_vport(handle);
5900         struct hclge_dev *hdev = vport->back;
5901
5902         ether_addr_copy(p, hdev->hw.mac.mac_addr);
5903 }
5904
5905 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
5906                               bool is_first)
5907 {
5908         const unsigned char *new_addr = (const unsigned char *)p;
5909         struct hclge_vport *vport = hclge_get_vport(handle);
5910         struct hclge_dev *hdev = vport->back;
5911         int ret;
5912
5913         /* mac addr check */
5914         if (is_zero_ether_addr(new_addr) ||
5915             is_broadcast_ether_addr(new_addr) ||
5916             is_multicast_ether_addr(new_addr)) {
5917                 dev_err(&hdev->pdev->dev,
5918                         "Change uc mac err! invalid mac:%p.\n",
5919                          new_addr);
5920                 return -EINVAL;
5921         }
5922
5923         if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
5924                 dev_warn(&hdev->pdev->dev,
5925                          "remove old uc mac address fail.\n");
5926
5927         ret = hclge_add_uc_addr(handle, new_addr);
5928         if (ret) {
5929                 dev_err(&hdev->pdev->dev,
5930                         "add uc mac address fail, ret =%d.\n",
5931                         ret);
5932
5933                 if (!is_first &&
5934                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
5935                         dev_err(&hdev->pdev->dev,
5936                                 "restore uc mac address fail.\n");
5937
5938                 return -EIO;
5939         }
5940
5941         ret = hclge_pause_addr_cfg(hdev, new_addr);
5942         if (ret) {
5943                 dev_err(&hdev->pdev->dev,
5944                         "configure mac pause address fail, ret =%d.\n",
5945                         ret);
5946                 return -EIO;
5947         }
5948
5949         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
5950
5951         return 0;
5952 }
5953
5954 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
5955                           int cmd)
5956 {
5957         struct hclge_vport *vport = hclge_get_vport(handle);
5958         struct hclge_dev *hdev = vport->back;
5959
5960         if (!hdev->hw.mac.phydev)
5961                 return -EOPNOTSUPP;
5962
5963         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
5964 }
5965
5966 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
5967                                       u8 fe_type, bool filter_en)
5968 {
5969         struct hclge_vlan_filter_ctrl_cmd *req;
5970         struct hclge_desc desc;
5971         int ret;
5972
5973         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
5974
5975         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
5976         req->vlan_type = vlan_type;
5977         req->vlan_fe = filter_en ? fe_type : 0;
5978
5979         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5980         if (ret)
5981                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
5982                         ret);
5983
5984         return ret;
5985 }
5986
5987 #define HCLGE_FILTER_TYPE_VF            0
5988 #define HCLGE_FILTER_TYPE_PORT          1
5989 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
5990 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
5991 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
5992 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
5993 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
5994 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
5995                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
5996 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
5997                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
5998
5999 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6000 {
6001         struct hclge_vport *vport = hclge_get_vport(handle);
6002         struct hclge_dev *hdev = vport->back;
6003
6004         if (hdev->pdev->revision >= 0x21) {
6005                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6006                                            HCLGE_FILTER_FE_EGRESS, enable);
6007                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6008                                            HCLGE_FILTER_FE_INGRESS, enable);
6009         } else {
6010                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6011                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable);
6012         }
6013         if (enable)
6014                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6015         else
6016                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6017 }
6018
6019 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6020                                     bool is_kill, u16 vlan, u8 qos,
6021                                     __be16 proto)
6022 {
6023 #define HCLGE_MAX_VF_BYTES  16
6024         struct hclge_vlan_filter_vf_cfg_cmd *req0;
6025         struct hclge_vlan_filter_vf_cfg_cmd *req1;
6026         struct hclge_desc desc[2];
6027         u8 vf_byte_val;
6028         u8 vf_byte_off;
6029         int ret;
6030
6031         hclge_cmd_setup_basic_desc(&desc[0],
6032                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6033         hclge_cmd_setup_basic_desc(&desc[1],
6034                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6035
6036         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6037
6038         vf_byte_off = vfid / 8;
6039         vf_byte_val = 1 << (vfid % 8);
6040
6041         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6042         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6043
6044         req0->vlan_id  = cpu_to_le16(vlan);
6045         req0->vlan_cfg = is_kill;
6046
6047         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6048                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6049         else
6050                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6051
6052         ret = hclge_cmd_send(&hdev->hw, desc, 2);
6053         if (ret) {
6054                 dev_err(&hdev->pdev->dev,
6055                         "Send vf vlan command fail, ret =%d.\n",
6056                         ret);
6057                 return ret;
6058         }
6059
6060         if (!is_kill) {
6061 #define HCLGE_VF_VLAN_NO_ENTRY  2
6062                 if (!req0->resp_code || req0->resp_code == 1)
6063                         return 0;
6064
6065                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6066                         dev_warn(&hdev->pdev->dev,
6067                                  "vf vlan table is full, vf vlan filter is disabled\n");
6068                         return 0;
6069                 }
6070
6071                 dev_err(&hdev->pdev->dev,
6072                         "Add vf vlan filter fail, ret =%d.\n",
6073                         req0->resp_code);
6074         } else {
6075 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
6076                 if (!req0->resp_code)
6077                         return 0;
6078
6079                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6080                         dev_warn(&hdev->pdev->dev,
6081                                  "vlan %d filter is not in vf vlan table\n",
6082                                  vlan);
6083                         return 0;
6084                 }
6085
6086                 dev_err(&hdev->pdev->dev,
6087                         "Kill vf vlan filter fail, ret =%d.\n",
6088                         req0->resp_code);
6089         }
6090
6091         return -EIO;
6092 }
6093
6094 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6095                                       u16 vlan_id, bool is_kill)
6096 {
6097         struct hclge_vlan_filter_pf_cfg_cmd *req;
6098         struct hclge_desc desc;
6099         u8 vlan_offset_byte_val;
6100         u8 vlan_offset_byte;
6101         u8 vlan_offset_160;
6102         int ret;
6103
6104         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6105
6106         vlan_offset_160 = vlan_id / 160;
6107         vlan_offset_byte = (vlan_id % 160) / 8;
6108         vlan_offset_byte_val = 1 << (vlan_id % 8);
6109
6110         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6111         req->vlan_offset = vlan_offset_160;
6112         req->vlan_cfg = is_kill;
6113         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6114
6115         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6116         if (ret)
6117                 dev_err(&hdev->pdev->dev,
6118                         "port vlan command, send fail, ret =%d.\n", ret);
6119         return ret;
6120 }
6121
6122 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6123                                     u16 vport_id, u16 vlan_id, u8 qos,
6124                                     bool is_kill)
6125 {
6126         u16 vport_idx, vport_num = 0;
6127         int ret;
6128
6129         if (is_kill && !vlan_id)
6130                 return 0;
6131
6132         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6133                                        0, proto);
6134         if (ret) {
6135                 dev_err(&hdev->pdev->dev,
6136                         "Set %d vport vlan filter config fail, ret =%d.\n",
6137                         vport_id, ret);
6138                 return ret;
6139         }
6140
6141         /* vlan 0 may be added twice when 8021q module is enabled */
6142         if (!is_kill && !vlan_id &&
6143             test_bit(vport_id, hdev->vlan_table[vlan_id]))
6144                 return 0;
6145
6146         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6147                 dev_err(&hdev->pdev->dev,
6148                         "Add port vlan failed, vport %d is already in vlan %d\n",
6149                         vport_id, vlan_id);
6150                 return -EINVAL;
6151         }
6152
6153         if (is_kill &&
6154             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6155                 dev_err(&hdev->pdev->dev,
6156                         "Delete port vlan failed, vport %d is not in vlan %d\n",
6157                         vport_id, vlan_id);
6158                 return -EINVAL;
6159         }
6160
6161         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6162                 vport_num++;
6163
6164         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6165                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6166                                                  is_kill);
6167
6168         return ret;
6169 }
6170
6171 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6172                           u16 vlan_id, bool is_kill)
6173 {
6174         struct hclge_vport *vport = hclge_get_vport(handle);
6175         struct hclge_dev *hdev = vport->back;
6176
6177         return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6178                                         0, is_kill);
6179 }
6180
6181 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6182                                     u16 vlan, u8 qos, __be16 proto)
6183 {
6184         struct hclge_vport *vport = hclge_get_vport(handle);
6185         struct hclge_dev *hdev = vport->back;
6186
6187         if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6188                 return -EINVAL;
6189         if (proto != htons(ETH_P_8021Q))
6190                 return -EPROTONOSUPPORT;
6191
6192         return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6193 }
6194
6195 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6196 {
6197         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6198         struct hclge_vport_vtag_tx_cfg_cmd *req;
6199         struct hclge_dev *hdev = vport->back;
6200         struct hclge_desc desc;
6201         int status;
6202
6203         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6204
6205         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6206         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6207         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6208         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6209                       vcfg->accept_tag1 ? 1 : 0);
6210         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6211                       vcfg->accept_untag1 ? 1 : 0);
6212         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6213                       vcfg->accept_tag2 ? 1 : 0);
6214         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6215                       vcfg->accept_untag2 ? 1 : 0);
6216         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6217                       vcfg->insert_tag1_en ? 1 : 0);
6218         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6219                       vcfg->insert_tag2_en ? 1 : 0);
6220         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6221
6222         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6223         req->vf_bitmap[req->vf_offset] =
6224                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6225
6226         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6227         if (status)
6228                 dev_err(&hdev->pdev->dev,
6229                         "Send port txvlan cfg command fail, ret =%d\n",
6230                         status);
6231
6232         return status;
6233 }
6234
6235 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6236 {
6237         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6238         struct hclge_vport_vtag_rx_cfg_cmd *req;
6239         struct hclge_dev *hdev = vport->back;
6240         struct hclge_desc desc;
6241         int status;
6242
6243         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6244
6245         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6246         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6247                       vcfg->strip_tag1_en ? 1 : 0);
6248         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6249                       vcfg->strip_tag2_en ? 1 : 0);
6250         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6251                       vcfg->vlan1_vlan_prionly ? 1 : 0);
6252         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6253                       vcfg->vlan2_vlan_prionly ? 1 : 0);
6254
6255         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6256         req->vf_bitmap[req->vf_offset] =
6257                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6258
6259         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6260         if (status)
6261                 dev_err(&hdev->pdev->dev,
6262                         "Send port rxvlan cfg command fail, ret =%d\n",
6263                         status);
6264
6265         return status;
6266 }
6267
6268 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6269 {
6270         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6271         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6272         struct hclge_desc desc;
6273         int status;
6274
6275         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6276         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6277         rx_req->ot_fst_vlan_type =
6278                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6279         rx_req->ot_sec_vlan_type =
6280                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6281         rx_req->in_fst_vlan_type =
6282                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6283         rx_req->in_sec_vlan_type =
6284                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6285
6286         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6287         if (status) {
6288                 dev_err(&hdev->pdev->dev,
6289                         "Send rxvlan protocol type command fail, ret =%d\n",
6290                         status);
6291                 return status;
6292         }
6293
6294         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6295
6296         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6297         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6298         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6299
6300         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6301         if (status)
6302                 dev_err(&hdev->pdev->dev,
6303                         "Send txvlan protocol type command fail, ret =%d\n",
6304                         status);
6305
6306         return status;
6307 }
6308
6309 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6310 {
6311 #define HCLGE_DEF_VLAN_TYPE             0x8100
6312
6313         struct hnae3_handle *handle = &hdev->vport[0].nic;
6314         struct hclge_vport *vport;
6315         int ret;
6316         int i;
6317
6318         if (hdev->pdev->revision >= 0x21) {
6319                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6320                                                  HCLGE_FILTER_FE_EGRESS, true);
6321                 if (ret)
6322                         return ret;
6323
6324                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6325                                                  HCLGE_FILTER_FE_INGRESS, true);
6326                 if (ret)
6327                         return ret;
6328         } else {
6329                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6330                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
6331                                                  true);
6332                 if (ret)
6333                         return ret;
6334         }
6335
6336         handle->netdev_flags |= HNAE3_VLAN_FLTR;
6337
6338         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6339         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6340         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6341         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6342         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6343         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6344
6345         ret = hclge_set_vlan_protocol_type(hdev);
6346         if (ret)
6347                 return ret;
6348
6349         for (i = 0; i < hdev->num_alloc_vport; i++) {
6350                 vport = &hdev->vport[i];
6351                 vport->txvlan_cfg.accept_tag1 = true;
6352                 vport->txvlan_cfg.accept_untag1 = true;
6353
6354                 /* accept_tag2 and accept_untag2 are not supported on
6355                  * pdev revision(0x20), new revision support them. The
6356                  * value of this two fields will not return error when driver
6357                  * send command to fireware in revision(0x20).
6358                  * This two fields can not configured by user.
6359                  */
6360                 vport->txvlan_cfg.accept_tag2 = true;
6361                 vport->txvlan_cfg.accept_untag2 = true;
6362
6363                 vport->txvlan_cfg.insert_tag1_en = false;
6364                 vport->txvlan_cfg.insert_tag2_en = false;
6365                 vport->txvlan_cfg.default_tag1 = 0;
6366                 vport->txvlan_cfg.default_tag2 = 0;
6367
6368                 ret = hclge_set_vlan_tx_offload_cfg(vport);
6369                 if (ret)
6370                         return ret;
6371
6372                 vport->rxvlan_cfg.strip_tag1_en = false;
6373                 vport->rxvlan_cfg.strip_tag2_en = true;
6374                 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6375                 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6376
6377                 ret = hclge_set_vlan_rx_offload_cfg(vport);
6378                 if (ret)
6379                         return ret;
6380         }
6381
6382         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6383 }
6384
6385 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6386 {
6387         struct hclge_vport *vport = hclge_get_vport(handle);
6388
6389         vport->rxvlan_cfg.strip_tag1_en = false;
6390         vport->rxvlan_cfg.strip_tag2_en = enable;
6391         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6392         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6393
6394         return hclge_set_vlan_rx_offload_cfg(vport);
6395 }
6396
6397 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6398 {
6399         struct hclge_config_max_frm_size_cmd *req;
6400         struct hclge_desc desc;
6401
6402         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6403
6404         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6405         req->max_frm_size = cpu_to_le16(new_mps);
6406         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6407
6408         return hclge_cmd_send(&hdev->hw, &desc, 1);
6409 }
6410
6411 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6412 {
6413         struct hclge_vport *vport = hclge_get_vport(handle);
6414
6415         return hclge_set_vport_mtu(vport, new_mtu);
6416 }
6417
6418 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6419 {
6420         struct hclge_dev *hdev = vport->back;
6421         int i, max_frm_size, ret = 0;
6422
6423         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6424         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6425             max_frm_size > HCLGE_MAC_MAX_FRAME)
6426                 return -EINVAL;
6427
6428         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6429         mutex_lock(&hdev->vport_lock);
6430         /* VF's mps must fit within hdev->mps */
6431         if (vport->vport_id && max_frm_size > hdev->mps) {
6432                 mutex_unlock(&hdev->vport_lock);
6433                 return -EINVAL;
6434         } else if (vport->vport_id) {
6435                 vport->mps = max_frm_size;
6436                 mutex_unlock(&hdev->vport_lock);
6437                 return 0;
6438         }
6439
6440         /* PF's mps must be greater then VF's mps */
6441         for (i = 1; i < hdev->num_alloc_vport; i++)
6442                 if (max_frm_size < hdev->vport[i].mps) {
6443                         mutex_unlock(&hdev->vport_lock);
6444                         return -EINVAL;
6445                 }
6446
6447         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6448
6449         ret = hclge_set_mac_mtu(hdev, max_frm_size);
6450         if (ret) {
6451                 dev_err(&hdev->pdev->dev,
6452                         "Change mtu fail, ret =%d\n", ret);
6453                 goto out;
6454         }
6455
6456         hdev->mps = max_frm_size;
6457         vport->mps = max_frm_size;
6458
6459         ret = hclge_buffer_alloc(hdev);
6460         if (ret)
6461                 dev_err(&hdev->pdev->dev,
6462                         "Allocate buffer fail, ret =%d\n", ret);
6463
6464 out:
6465         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6466         mutex_unlock(&hdev->vport_lock);
6467         return ret;
6468 }
6469
6470 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6471                                     bool enable)
6472 {
6473         struct hclge_reset_tqp_queue_cmd *req;
6474         struct hclge_desc desc;
6475         int ret;
6476
6477         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6478
6479         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6480         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6481         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6482
6483         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6484         if (ret) {
6485                 dev_err(&hdev->pdev->dev,
6486                         "Send tqp reset cmd error, status =%d\n", ret);
6487                 return ret;
6488         }
6489
6490         return 0;
6491 }
6492
6493 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6494 {
6495         struct hclge_reset_tqp_queue_cmd *req;
6496         struct hclge_desc desc;
6497         int ret;
6498
6499         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6500
6501         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6502         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6503
6504         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6505         if (ret) {
6506                 dev_err(&hdev->pdev->dev,
6507                         "Get reset status error, status =%d\n", ret);
6508                 return ret;
6509         }
6510
6511         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6512 }
6513
6514 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
6515                                           u16 queue_id)
6516 {
6517         struct hnae3_queue *queue;
6518         struct hclge_tqp *tqp;
6519
6520         queue = handle->kinfo.tqp[queue_id];
6521         tqp = container_of(queue, struct hclge_tqp, q);
6522
6523         return tqp->index;
6524 }
6525
6526 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
6527 {
6528         struct hclge_vport *vport = hclge_get_vport(handle);
6529         struct hclge_dev *hdev = vport->back;
6530         int reset_try_times = 0;
6531         int reset_status;
6532         u16 queue_gid;
6533         int ret = 0;
6534
6535         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
6536
6537         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
6538         if (ret) {
6539                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
6540                 return ret;
6541         }
6542
6543         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6544         if (ret) {
6545                 dev_err(&hdev->pdev->dev,
6546                         "Send reset tqp cmd fail, ret = %d\n", ret);
6547                 return ret;
6548         }
6549
6550         reset_try_times = 0;
6551         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6552                 /* Wait for tqp hw reset */
6553                 msleep(20);
6554                 reset_status = hclge_get_reset_status(hdev, queue_gid);
6555                 if (reset_status)
6556                         break;
6557         }
6558
6559         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6560                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
6561                 return ret;
6562         }
6563
6564         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6565         if (ret)
6566                 dev_err(&hdev->pdev->dev,
6567                         "Deassert the soft reset fail, ret = %d\n", ret);
6568
6569         return ret;
6570 }
6571
6572 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
6573 {
6574         struct hclge_dev *hdev = vport->back;
6575         int reset_try_times = 0;
6576         int reset_status;
6577         u16 queue_gid;
6578         int ret;
6579
6580         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
6581
6582         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6583         if (ret) {
6584                 dev_warn(&hdev->pdev->dev,
6585                          "Send reset tqp cmd fail, ret = %d\n", ret);
6586                 return;
6587         }
6588
6589         reset_try_times = 0;
6590         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6591                 /* Wait for tqp hw reset */
6592                 msleep(20);
6593                 reset_status = hclge_get_reset_status(hdev, queue_gid);
6594                 if (reset_status)
6595                         break;
6596         }
6597
6598         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6599                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
6600                 return;
6601         }
6602
6603         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6604         if (ret)
6605                 dev_warn(&hdev->pdev->dev,
6606                          "Deassert the soft reset fail, ret = %d\n", ret);
6607 }
6608
6609 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
6610 {
6611         struct hclge_vport *vport = hclge_get_vport(handle);
6612         struct hclge_dev *hdev = vport->back;
6613
6614         return hdev->fw_version;
6615 }
6616
6617 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6618 {
6619         struct phy_device *phydev = hdev->hw.mac.phydev;
6620
6621         if (!phydev)
6622                 return;
6623
6624         phy_set_asym_pause(phydev, rx_en, tx_en);
6625 }
6626
6627 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6628 {
6629         int ret;
6630
6631         if (rx_en && tx_en)
6632                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
6633         else if (rx_en && !tx_en)
6634                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
6635         else if (!rx_en && tx_en)
6636                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
6637         else
6638                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
6639
6640         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
6641                 return 0;
6642
6643         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
6644         if (ret) {
6645                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
6646                         ret);
6647                 return ret;
6648         }
6649
6650         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
6651
6652         return 0;
6653 }
6654
6655 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
6656 {
6657         struct phy_device *phydev = hdev->hw.mac.phydev;
6658         u16 remote_advertising = 0;
6659         u16 local_advertising = 0;
6660         u32 rx_pause, tx_pause;
6661         u8 flowctl;
6662
6663         if (!phydev->link || !phydev->autoneg)
6664                 return 0;
6665
6666         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
6667
6668         if (phydev->pause)
6669                 remote_advertising = LPA_PAUSE_CAP;
6670
6671         if (phydev->asym_pause)
6672                 remote_advertising |= LPA_PAUSE_ASYM;
6673
6674         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
6675                                            remote_advertising);
6676         tx_pause = flowctl & FLOW_CTRL_TX;
6677         rx_pause = flowctl & FLOW_CTRL_RX;
6678
6679         if (phydev->duplex == HCLGE_MAC_HALF) {
6680                 tx_pause = 0;
6681                 rx_pause = 0;
6682         }
6683
6684         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
6685 }
6686
6687 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6688                                  u32 *rx_en, u32 *tx_en)
6689 {
6690         struct hclge_vport *vport = hclge_get_vport(handle);
6691         struct hclge_dev *hdev = vport->back;
6692
6693         *auto_neg = hclge_get_autoneg(handle);
6694
6695         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6696                 *rx_en = 0;
6697                 *tx_en = 0;
6698                 return;
6699         }
6700
6701         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6702                 *rx_en = 1;
6703                 *tx_en = 0;
6704         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6705                 *tx_en = 1;
6706                 *rx_en = 0;
6707         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6708                 *rx_en = 1;
6709                 *tx_en = 1;
6710         } else {
6711                 *rx_en = 0;
6712                 *tx_en = 0;
6713         }
6714 }
6715
6716 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6717                                 u32 rx_en, u32 tx_en)
6718 {
6719         struct hclge_vport *vport = hclge_get_vport(handle);
6720         struct hclge_dev *hdev = vport->back;
6721         struct phy_device *phydev = hdev->hw.mac.phydev;
6722         u32 fc_autoneg;
6723
6724         fc_autoneg = hclge_get_autoneg(handle);
6725         if (auto_neg != fc_autoneg) {
6726                 dev_info(&hdev->pdev->dev,
6727                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6728                 return -EOPNOTSUPP;
6729         }
6730
6731         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6732                 dev_info(&hdev->pdev->dev,
6733                          "Priority flow control enabled. Cannot set link flow control.\n");
6734                 return -EOPNOTSUPP;
6735         }
6736
6737         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6738
6739         if (!fc_autoneg)
6740                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6741
6742         /* Only support flow control negotiation for netdev with
6743          * phy attached for now.
6744          */
6745         if (!phydev)
6746                 return -EOPNOTSUPP;
6747
6748         return phy_start_aneg(phydev);
6749 }
6750
6751 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6752                                           u8 *auto_neg, u32 *speed, u8 *duplex)
6753 {
6754         struct hclge_vport *vport = hclge_get_vport(handle);
6755         struct hclge_dev *hdev = vport->back;
6756
6757         if (speed)
6758                 *speed = hdev->hw.mac.speed;
6759         if (duplex)
6760                 *duplex = hdev->hw.mac.duplex;
6761         if (auto_neg)
6762                 *auto_neg = hdev->hw.mac.autoneg;
6763 }
6764
6765 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
6766 {
6767         struct hclge_vport *vport = hclge_get_vport(handle);
6768         struct hclge_dev *hdev = vport->back;
6769
6770         if (media_type)
6771                 *media_type = hdev->hw.mac.media_type;
6772 }
6773
6774 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
6775                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
6776 {
6777         struct hclge_vport *vport = hclge_get_vport(handle);
6778         struct hclge_dev *hdev = vport->back;
6779         struct phy_device *phydev = hdev->hw.mac.phydev;
6780         int mdix_ctrl, mdix, retval, is_resolved;
6781
6782         if (!phydev) {
6783                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6784                 *tp_mdix = ETH_TP_MDI_INVALID;
6785                 return;
6786         }
6787
6788         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
6789
6790         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
6791         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
6792                                     HCLGE_PHY_MDIX_CTRL_S);
6793
6794         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
6795         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
6796         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
6797
6798         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
6799
6800         switch (mdix_ctrl) {
6801         case 0x0:
6802                 *tp_mdix_ctrl = ETH_TP_MDI;
6803                 break;
6804         case 0x1:
6805                 *tp_mdix_ctrl = ETH_TP_MDI_X;
6806                 break;
6807         case 0x3:
6808                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
6809                 break;
6810         default:
6811                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6812                 break;
6813         }
6814
6815         if (!is_resolved)
6816                 *tp_mdix = ETH_TP_MDI_INVALID;
6817         else if (mdix)
6818                 *tp_mdix = ETH_TP_MDI_X;
6819         else
6820                 *tp_mdix = ETH_TP_MDI;
6821 }
6822
6823 static int hclge_init_instance_hw(struct hclge_dev *hdev)
6824 {
6825         return hclge_mac_connect_phy(hdev);
6826 }
6827
6828 static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
6829 {
6830         hclge_mac_disconnect_phy(hdev);
6831 }
6832
6833 static int hclge_init_client_instance(struct hnae3_client *client,
6834                                       struct hnae3_ae_dev *ae_dev)
6835 {
6836         struct hclge_dev *hdev = ae_dev->priv;
6837         struct hclge_vport *vport;
6838         int i, ret;
6839
6840         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
6841                 vport = &hdev->vport[i];
6842
6843                 switch (client->type) {
6844                 case HNAE3_CLIENT_KNIC:
6845
6846                         hdev->nic_client = client;
6847                         vport->nic.client = client;
6848                         ret = client->ops->init_instance(&vport->nic);
6849                         if (ret)
6850                                 goto clear_nic;
6851
6852                         ret = hclge_init_instance_hw(hdev);
6853                         if (ret) {
6854                                 client->ops->uninit_instance(&vport->nic,
6855                                                              0);
6856                                 goto clear_nic;
6857                         }
6858
6859                         hnae3_set_client_init_flag(client, ae_dev, 1);
6860
6861                         if (hdev->roce_client &&
6862                             hnae3_dev_roce_supported(hdev)) {
6863                                 struct hnae3_client *rc = hdev->roce_client;
6864
6865                                 ret = hclge_init_roce_base_info(vport);
6866                                 if (ret)
6867                                         goto clear_roce;
6868
6869                                 ret = rc->ops->init_instance(&vport->roce);
6870                                 if (ret)
6871                                         goto clear_roce;
6872
6873                                 hnae3_set_client_init_flag(hdev->roce_client,
6874                                                            ae_dev, 1);
6875                         }
6876
6877                         break;
6878                 case HNAE3_CLIENT_UNIC:
6879                         hdev->nic_client = client;
6880                         vport->nic.client = client;
6881
6882                         ret = client->ops->init_instance(&vport->nic);
6883                         if (ret)
6884                                 goto clear_nic;
6885
6886                         hnae3_set_client_init_flag(client, ae_dev, 1);
6887
6888                         break;
6889                 case HNAE3_CLIENT_ROCE:
6890                         if (hnae3_dev_roce_supported(hdev)) {
6891                                 hdev->roce_client = client;
6892                                 vport->roce.client = client;
6893                         }
6894
6895                         if (hdev->roce_client && hdev->nic_client) {
6896                                 ret = hclge_init_roce_base_info(vport);
6897                                 if (ret)
6898                                         goto clear_roce;
6899
6900                                 ret = client->ops->init_instance(&vport->roce);
6901                                 if (ret)
6902                                         goto clear_roce;
6903
6904                                 hnae3_set_client_init_flag(client, ae_dev, 1);
6905                         }
6906
6907                         break;
6908                 default:
6909                         return -EINVAL;
6910                 }
6911         }
6912
6913         return 0;
6914
6915 clear_nic:
6916         hdev->nic_client = NULL;
6917         vport->nic.client = NULL;
6918         return ret;
6919 clear_roce:
6920         hdev->roce_client = NULL;
6921         vport->roce.client = NULL;
6922         return ret;
6923 }
6924
6925 static void hclge_uninit_client_instance(struct hnae3_client *client,
6926                                          struct hnae3_ae_dev *ae_dev)
6927 {
6928         struct hclge_dev *hdev = ae_dev->priv;
6929         struct hclge_vport *vport;
6930         int i;
6931
6932         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
6933                 vport = &hdev->vport[i];
6934                 if (hdev->roce_client) {
6935                         hdev->roce_client->ops->uninit_instance(&vport->roce,
6936                                                                 0);
6937                         hdev->roce_client = NULL;
6938                         vport->roce.client = NULL;
6939                 }
6940                 if (client->type == HNAE3_CLIENT_ROCE)
6941                         return;
6942                 if (hdev->nic_client && client->ops->uninit_instance) {
6943                         hclge_uninit_instance_hw(hdev);
6944                         client->ops->uninit_instance(&vport->nic, 0);
6945                         hdev->nic_client = NULL;
6946                         vport->nic.client = NULL;
6947                 }
6948         }
6949 }
6950
6951 static int hclge_pci_init(struct hclge_dev *hdev)
6952 {
6953         struct pci_dev *pdev = hdev->pdev;
6954         struct hclge_hw *hw;
6955         int ret;
6956
6957         ret = pci_enable_device(pdev);
6958         if (ret) {
6959                 dev_err(&pdev->dev, "failed to enable PCI device\n");
6960                 return ret;
6961         }
6962
6963         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6964         if (ret) {
6965                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6966                 if (ret) {
6967                         dev_err(&pdev->dev,
6968                                 "can't set consistent PCI DMA");
6969                         goto err_disable_device;
6970                 }
6971                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
6972         }
6973
6974         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
6975         if (ret) {
6976                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
6977                 goto err_disable_device;
6978         }
6979
6980         pci_set_master(pdev);
6981         hw = &hdev->hw;
6982         hw->io_base = pcim_iomap(pdev, 2, 0);
6983         if (!hw->io_base) {
6984                 dev_err(&pdev->dev, "Can't map configuration register space\n");
6985                 ret = -ENOMEM;
6986                 goto err_clr_master;
6987         }
6988
6989         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
6990
6991         return 0;
6992 err_clr_master:
6993         pci_clear_master(pdev);
6994         pci_release_regions(pdev);
6995 err_disable_device:
6996         pci_disable_device(pdev);
6997
6998         return ret;
6999 }
7000
7001 static void hclge_pci_uninit(struct hclge_dev *hdev)
7002 {
7003         struct pci_dev *pdev = hdev->pdev;
7004
7005         pcim_iounmap(pdev, hdev->hw.io_base);
7006         pci_free_irq_vectors(pdev);
7007         pci_clear_master(pdev);
7008         pci_release_mem_regions(pdev);
7009         pci_disable_device(pdev);
7010 }
7011
7012 static void hclge_state_init(struct hclge_dev *hdev)
7013 {
7014         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7015         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7016         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7017         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7018         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7019         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7020 }
7021
7022 static void hclge_state_uninit(struct hclge_dev *hdev)
7023 {
7024         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7025
7026         if (hdev->service_timer.function)
7027                 del_timer_sync(&hdev->service_timer);
7028         if (hdev->reset_timer.function)
7029                 del_timer_sync(&hdev->reset_timer);
7030         if (hdev->service_task.func)
7031                 cancel_work_sync(&hdev->service_task);
7032         if (hdev->rst_service_task.func)
7033                 cancel_work_sync(&hdev->rst_service_task);
7034         if (hdev->mbx_service_task.func)
7035                 cancel_work_sync(&hdev->mbx_service_task);
7036 }
7037
7038 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7039 {
7040 #define HCLGE_FLR_WAIT_MS       100
7041 #define HCLGE_FLR_WAIT_CNT      50
7042         struct hclge_dev *hdev = ae_dev->priv;
7043         int cnt = 0;
7044
7045         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7046         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7047         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7048         hclge_reset_event(hdev->pdev, NULL);
7049
7050         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7051                cnt++ < HCLGE_FLR_WAIT_CNT)
7052                 msleep(HCLGE_FLR_WAIT_MS);
7053
7054         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7055                 dev_err(&hdev->pdev->dev,
7056                         "flr wait down timeout: %d\n", cnt);
7057 }
7058
7059 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7060 {
7061         struct hclge_dev *hdev = ae_dev->priv;
7062
7063         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7064 }
7065
7066 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7067 {
7068         struct pci_dev *pdev = ae_dev->pdev;
7069         struct hclge_dev *hdev;
7070         int ret;
7071
7072         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7073         if (!hdev) {
7074                 ret = -ENOMEM;
7075                 goto out;
7076         }
7077
7078         hdev->pdev = pdev;
7079         hdev->ae_dev = ae_dev;
7080         hdev->reset_type = HNAE3_NONE_RESET;
7081         hdev->reset_level = HNAE3_FUNC_RESET;
7082         ae_dev->priv = hdev;
7083         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7084
7085         mutex_init(&hdev->vport_lock);
7086
7087         ret = hclge_pci_init(hdev);
7088         if (ret) {
7089                 dev_err(&pdev->dev, "PCI init failed\n");
7090                 goto out;
7091         }
7092
7093         /* Firmware command queue initialize */
7094         ret = hclge_cmd_queue_init(hdev);
7095         if (ret) {
7096                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7097                 goto err_pci_uninit;
7098         }
7099
7100         /* Firmware command initialize */
7101         ret = hclge_cmd_init(hdev);
7102         if (ret)
7103                 goto err_cmd_uninit;
7104
7105         ret = hclge_get_cap(hdev);
7106         if (ret) {
7107                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7108                         ret);
7109                 goto err_cmd_uninit;
7110         }
7111
7112         ret = hclge_configure(hdev);
7113         if (ret) {
7114                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7115                 goto err_cmd_uninit;
7116         }
7117
7118         ret = hclge_init_msi(hdev);
7119         if (ret) {
7120                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7121                 goto err_cmd_uninit;
7122         }
7123
7124         ret = hclge_misc_irq_init(hdev);
7125         if (ret) {
7126                 dev_err(&pdev->dev,
7127                         "Misc IRQ(vector0) init error, ret = %d.\n",
7128                         ret);
7129                 goto err_msi_uninit;
7130         }
7131
7132         ret = hclge_alloc_tqps(hdev);
7133         if (ret) {
7134                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7135                 goto err_msi_irq_uninit;
7136         }
7137
7138         ret = hclge_alloc_vport(hdev);
7139         if (ret) {
7140                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7141                 goto err_msi_irq_uninit;
7142         }
7143
7144         ret = hclge_map_tqp(hdev);
7145         if (ret) {
7146                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7147                 goto err_msi_irq_uninit;
7148         }
7149
7150         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7151                 ret = hclge_mac_mdio_config(hdev);
7152                 if (ret) {
7153                         dev_err(&hdev->pdev->dev,
7154                                 "mdio config fail ret=%d\n", ret);
7155                         goto err_msi_irq_uninit;
7156                 }
7157         }
7158
7159         ret = hclge_init_umv_space(hdev);
7160         if (ret) {
7161                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7162                 goto err_msi_irq_uninit;
7163         }
7164
7165         ret = hclge_mac_init(hdev);
7166         if (ret) {
7167                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7168                 goto err_mdiobus_unreg;
7169         }
7170
7171         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7172         if (ret) {
7173                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7174                 goto err_mdiobus_unreg;
7175         }
7176
7177         ret = hclge_config_gro(hdev, true);
7178         if (ret)
7179                 goto err_mdiobus_unreg;
7180
7181         ret = hclge_init_vlan_config(hdev);
7182         if (ret) {
7183                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7184                 goto err_mdiobus_unreg;
7185         }
7186
7187         ret = hclge_tm_schd_init(hdev);
7188         if (ret) {
7189                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7190                 goto err_mdiobus_unreg;
7191         }
7192
7193         hclge_rss_init_cfg(hdev);
7194         ret = hclge_rss_init_hw(hdev);
7195         if (ret) {
7196                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7197                 goto err_mdiobus_unreg;
7198         }
7199
7200         ret = init_mgr_tbl(hdev);
7201         if (ret) {
7202                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7203                 goto err_mdiobus_unreg;
7204         }
7205
7206         ret = hclge_init_fd_config(hdev);
7207         if (ret) {
7208                 dev_err(&pdev->dev,
7209                         "fd table init fail, ret=%d\n", ret);
7210                 goto err_mdiobus_unreg;
7211         }
7212
7213         ret = hclge_hw_error_set_state(hdev, true);
7214         if (ret) {
7215                 dev_err(&pdev->dev,
7216                         "hw error interrupts enable failed, ret =%d\n", ret);
7217                 goto err_mdiobus_unreg;
7218         }
7219
7220         hclge_dcb_ops_set(hdev);
7221
7222         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7223         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7224         INIT_WORK(&hdev->service_task, hclge_service_task);
7225         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7226         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7227
7228         hclge_clear_all_event_cause(hdev);
7229
7230         /* Enable MISC vector(vector0) */
7231         hclge_enable_vector(&hdev->misc_vector, true);
7232
7233         hclge_state_init(hdev);
7234         hdev->last_reset_time = jiffies;
7235
7236         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7237         return 0;
7238
7239 err_mdiobus_unreg:
7240         if (hdev->hw.mac.phydev)
7241                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
7242 err_msi_irq_uninit:
7243         hclge_misc_irq_uninit(hdev);
7244 err_msi_uninit:
7245         pci_free_irq_vectors(pdev);
7246 err_cmd_uninit:
7247         hclge_destroy_cmd_queue(&hdev->hw);
7248 err_pci_uninit:
7249         pcim_iounmap(pdev, hdev->hw.io_base);
7250         pci_clear_master(pdev);
7251         pci_release_regions(pdev);
7252         pci_disable_device(pdev);
7253 out:
7254         return ret;
7255 }
7256
7257 static void hclge_stats_clear(struct hclge_dev *hdev)
7258 {
7259         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7260 }
7261
7262 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7263 {
7264         struct hclge_vport *vport = hdev->vport;
7265         int i;
7266
7267         for (i = 0; i < hdev->num_alloc_vport; i++) {
7268                 hclge_vport_start(vport);
7269                 vport++;
7270         }
7271 }
7272
7273 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7274 {
7275         struct hclge_dev *hdev = ae_dev->priv;
7276         struct pci_dev *pdev = ae_dev->pdev;
7277         int ret;
7278
7279         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7280
7281         hclge_stats_clear(hdev);
7282         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7283
7284         ret = hclge_cmd_init(hdev);
7285         if (ret) {
7286                 dev_err(&pdev->dev, "Cmd queue init failed\n");
7287                 return ret;
7288         }
7289
7290         ret = hclge_get_cap(hdev);
7291         if (ret) {
7292                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7293                         ret);
7294                 return ret;
7295         }
7296
7297         ret = hclge_configure(hdev);
7298         if (ret) {
7299                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7300                 return ret;
7301         }
7302
7303         ret = hclge_map_tqp(hdev);
7304         if (ret) {
7305                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7306                 return ret;
7307         }
7308
7309         hclge_reset_umv_space(hdev);
7310
7311         ret = hclge_mac_init(hdev);
7312         if (ret) {
7313                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7314                 return ret;
7315         }
7316
7317         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7318         if (ret) {
7319                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7320                 return ret;
7321         }
7322
7323         ret = hclge_config_gro(hdev, true);
7324         if (ret)
7325                 return ret;
7326
7327         ret = hclge_init_vlan_config(hdev);
7328         if (ret) {
7329                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7330                 return ret;
7331         }
7332
7333         ret = hclge_tm_init_hw(hdev);
7334         if (ret) {
7335                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7336                 return ret;
7337         }
7338
7339         ret = hclge_rss_init_hw(hdev);
7340         if (ret) {
7341                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7342                 return ret;
7343         }
7344
7345         ret = hclge_init_fd_config(hdev);
7346         if (ret) {
7347                 dev_err(&pdev->dev,
7348                         "fd table init fail, ret=%d\n", ret);
7349                 return ret;
7350         }
7351
7352         /* Re-enable the TM hw error interrupts because
7353          * they get disabled on core/global reset.
7354          */
7355         if (hclge_enable_tm_hw_error(hdev, true))
7356                 dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
7357
7358         hclge_reset_vport_state(hdev);
7359
7360         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7361                  HCLGE_DRIVER_NAME);
7362
7363         return 0;
7364 }
7365
7366 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7367 {
7368         struct hclge_dev *hdev = ae_dev->priv;
7369         struct hclge_mac *mac = &hdev->hw.mac;
7370
7371         hclge_state_uninit(hdev);
7372
7373         if (mac->phydev)
7374                 mdiobus_unregister(mac->mdio_bus);
7375
7376         hclge_uninit_umv_space(hdev);
7377
7378         /* Disable MISC vector(vector0) */
7379         hclge_enable_vector(&hdev->misc_vector, false);
7380         synchronize_irq(hdev->misc_vector.vector_irq);
7381
7382         hclge_hw_error_set_state(hdev, false);
7383         hclge_destroy_cmd_queue(&hdev->hw);
7384         hclge_misc_irq_uninit(hdev);
7385         hclge_pci_uninit(hdev);
7386         mutex_destroy(&hdev->vport_lock);
7387         ae_dev->priv = NULL;
7388 }
7389
7390 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7391 {
7392         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7393         struct hclge_vport *vport = hclge_get_vport(handle);
7394         struct hclge_dev *hdev = vport->back;
7395
7396         return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
7397 }
7398
7399 static void hclge_get_channels(struct hnae3_handle *handle,
7400                                struct ethtool_channels *ch)
7401 {
7402         struct hclge_vport *vport = hclge_get_vport(handle);
7403
7404         ch->max_combined = hclge_get_max_channels(handle);
7405         ch->other_count = 1;
7406         ch->max_other = 1;
7407         ch->combined_count = vport->alloc_tqps;
7408 }
7409
7410 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7411                                         u16 *alloc_tqps, u16 *max_rss_size)
7412 {
7413         struct hclge_vport *vport = hclge_get_vport(handle);
7414         struct hclge_dev *hdev = vport->back;
7415
7416         *alloc_tqps = vport->alloc_tqps;
7417         *max_rss_size = hdev->rss_size_max;
7418 }
7419
7420 static void hclge_release_tqp(struct hclge_vport *vport)
7421 {
7422         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7423         struct hclge_dev *hdev = vport->back;
7424         int i;
7425
7426         for (i = 0; i < kinfo->num_tqps; i++) {
7427                 struct hclge_tqp *tqp =
7428                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
7429
7430                 tqp->q.handle = NULL;
7431                 tqp->q.tqp_index = 0;
7432                 tqp->alloced = false;
7433         }
7434
7435         devm_kfree(&hdev->pdev->dev, kinfo->tqp);
7436         kinfo->tqp = NULL;
7437 }
7438
7439 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
7440 {
7441         struct hclge_vport *vport = hclge_get_vport(handle);
7442         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7443         struct hclge_dev *hdev = vport->back;
7444         int cur_rss_size = kinfo->rss_size;
7445         int cur_tqps = kinfo->num_tqps;
7446         u16 tc_offset[HCLGE_MAX_TC_NUM];
7447         u16 tc_valid[HCLGE_MAX_TC_NUM];
7448         u16 tc_size[HCLGE_MAX_TC_NUM];
7449         u16 roundup_size;
7450         u32 *rss_indir;
7451         int ret, i;
7452
7453         /* Free old tqps, and reallocate with new tqp number when nic setup */
7454         hclge_release_tqp(vport);
7455
7456         ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
7457         if (ret) {
7458                 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
7459                 return ret;
7460         }
7461
7462         ret = hclge_map_tqp_to_vport(hdev, vport);
7463         if (ret) {
7464                 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
7465                 return ret;
7466         }
7467
7468         ret = hclge_tm_schd_init(hdev);
7469         if (ret) {
7470                 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
7471                 return ret;
7472         }
7473
7474         roundup_size = roundup_pow_of_two(kinfo->rss_size);
7475         roundup_size = ilog2(roundup_size);
7476         /* Set the RSS TC mode according to the new RSS size */
7477         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7478                 tc_valid[i] = 0;
7479
7480                 if (!(hdev->hw_tc_map & BIT(i)))
7481                         continue;
7482
7483                 tc_valid[i] = 1;
7484                 tc_size[i] = roundup_size;
7485                 tc_offset[i] = kinfo->rss_size * i;
7486         }
7487         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7488         if (ret)
7489                 return ret;
7490
7491         /* Reinitializes the rss indirect table according to the new RSS size */
7492         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7493         if (!rss_indir)
7494                 return -ENOMEM;
7495
7496         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7497                 rss_indir[i] = i % kinfo->rss_size;
7498
7499         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7500         if (ret)
7501                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7502                         ret);
7503
7504         kfree(rss_indir);
7505
7506         if (!ret)
7507                 dev_info(&hdev->pdev->dev,
7508                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7509                          cur_rss_size, kinfo->rss_size,
7510                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
7511
7512         return ret;
7513 }
7514
7515 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7516                               u32 *regs_num_64_bit)
7517 {
7518         struct hclge_desc desc;
7519         u32 total_num;
7520         int ret;
7521
7522         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7523         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7524         if (ret) {
7525                 dev_err(&hdev->pdev->dev,
7526                         "Query register number cmd failed, ret = %d.\n", ret);
7527                 return ret;
7528         }
7529
7530         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
7531         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
7532
7533         total_num = *regs_num_32_bit + *regs_num_64_bit;
7534         if (!total_num)
7535                 return -EINVAL;
7536
7537         return 0;
7538 }
7539
7540 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7541                                  void *data)
7542 {
7543 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7544
7545         struct hclge_desc *desc;
7546         u32 *reg_val = data;
7547         __le32 *desc_data;
7548         int cmd_num;
7549         int i, k, n;
7550         int ret;
7551
7552         if (regs_num == 0)
7553                 return 0;
7554
7555         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7556         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7557         if (!desc)
7558                 return -ENOMEM;
7559
7560         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7561         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7562         if (ret) {
7563                 dev_err(&hdev->pdev->dev,
7564                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
7565                 kfree(desc);
7566                 return ret;
7567         }
7568
7569         for (i = 0; i < cmd_num; i++) {
7570                 if (i == 0) {
7571                         desc_data = (__le32 *)(&desc[i].data[0]);
7572                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
7573                 } else {
7574                         desc_data = (__le32 *)(&desc[i]);
7575                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
7576                 }
7577                 for (k = 0; k < n; k++) {
7578                         *reg_val++ = le32_to_cpu(*desc_data++);
7579
7580                         regs_num--;
7581                         if (!regs_num)
7582                                 break;
7583                 }
7584         }
7585
7586         kfree(desc);
7587         return 0;
7588 }
7589
7590 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7591                                  void *data)
7592 {
7593 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7594
7595         struct hclge_desc *desc;
7596         u64 *reg_val = data;
7597         __le64 *desc_data;
7598         int cmd_num;
7599         int i, k, n;
7600         int ret;
7601
7602         if (regs_num == 0)
7603                 return 0;
7604
7605         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
7606         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7607         if (!desc)
7608                 return -ENOMEM;
7609
7610         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
7611         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7612         if (ret) {
7613                 dev_err(&hdev->pdev->dev,
7614                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
7615                 kfree(desc);
7616                 return ret;
7617         }
7618
7619         for (i = 0; i < cmd_num; i++) {
7620                 if (i == 0) {
7621                         desc_data = (__le64 *)(&desc[i].data[0]);
7622                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
7623                 } else {
7624                         desc_data = (__le64 *)(&desc[i]);
7625                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
7626                 }
7627                 for (k = 0; k < n; k++) {
7628                         *reg_val++ = le64_to_cpu(*desc_data++);
7629
7630                         regs_num--;
7631                         if (!regs_num)
7632                                 break;
7633                 }
7634         }
7635
7636         kfree(desc);
7637         return 0;
7638 }
7639
7640 static int hclge_get_regs_len(struct hnae3_handle *handle)
7641 {
7642         struct hclge_vport *vport = hclge_get_vport(handle);
7643         struct hclge_dev *hdev = vport->back;
7644         u32 regs_num_32_bit, regs_num_64_bit;
7645         int ret;
7646
7647         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7648         if (ret) {
7649                 dev_err(&hdev->pdev->dev,
7650                         "Get register number failed, ret = %d.\n", ret);
7651                 return -EOPNOTSUPP;
7652         }
7653
7654         return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
7655 }
7656
7657 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
7658                            void *data)
7659 {
7660         struct hclge_vport *vport = hclge_get_vport(handle);
7661         struct hclge_dev *hdev = vport->back;
7662         u32 regs_num_32_bit, regs_num_64_bit;
7663         int ret;
7664
7665         *version = hdev->fw_version;
7666
7667         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
7668         if (ret) {
7669                 dev_err(&hdev->pdev->dev,
7670                         "Get register number failed, ret = %d.\n", ret);
7671                 return;
7672         }
7673
7674         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
7675         if (ret) {
7676                 dev_err(&hdev->pdev->dev,
7677                         "Get 32 bit register failed, ret = %d.\n", ret);
7678                 return;
7679         }
7680
7681         data = (u32 *)data + regs_num_32_bit;
7682         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
7683                                     data);
7684         if (ret)
7685                 dev_err(&hdev->pdev->dev,
7686                         "Get 64 bit register failed, ret = %d.\n", ret);
7687 }
7688
7689 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
7690 {
7691         struct hclge_set_led_state_cmd *req;
7692         struct hclge_desc desc;
7693         int ret;
7694
7695         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
7696
7697         req = (struct hclge_set_led_state_cmd *)desc.data;
7698         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
7699                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
7700
7701         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7702         if (ret)
7703                 dev_err(&hdev->pdev->dev,
7704                         "Send set led state cmd error, ret =%d\n", ret);
7705
7706         return ret;
7707 }
7708
7709 enum hclge_led_status {
7710         HCLGE_LED_OFF,
7711         HCLGE_LED_ON,
7712         HCLGE_LED_NO_CHANGE = 0xFF,
7713 };
7714
7715 static int hclge_set_led_id(struct hnae3_handle *handle,
7716                             enum ethtool_phys_id_state status)
7717 {
7718         struct hclge_vport *vport = hclge_get_vport(handle);
7719         struct hclge_dev *hdev = vport->back;
7720
7721         switch (status) {
7722         case ETHTOOL_ID_ACTIVE:
7723                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
7724         case ETHTOOL_ID_INACTIVE:
7725                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
7726         default:
7727                 return -EINVAL;
7728         }
7729 }
7730
7731 static void hclge_get_link_mode(struct hnae3_handle *handle,
7732                                 unsigned long *supported,
7733                                 unsigned long *advertising)
7734 {
7735         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
7736         struct hclge_vport *vport = hclge_get_vport(handle);
7737         struct hclge_dev *hdev = vport->back;
7738         unsigned int idx = 0;
7739
7740         for (; idx < size; idx++) {
7741                 supported[idx] = hdev->hw.mac.supported[idx];
7742                 advertising[idx] = hdev->hw.mac.advertising[idx];
7743         }
7744 }
7745
7746 static int hclge_gro_en(struct hnae3_handle *handle, int enable)
7747 {
7748         struct hclge_vport *vport = hclge_get_vport(handle);
7749         struct hclge_dev *hdev = vport->back;
7750
7751         return hclge_config_gro(hdev, enable);
7752 }
7753
7754 static const struct hnae3_ae_ops hclge_ops = {
7755         .init_ae_dev = hclge_init_ae_dev,
7756         .uninit_ae_dev = hclge_uninit_ae_dev,
7757         .flr_prepare = hclge_flr_prepare,
7758         .flr_done = hclge_flr_done,
7759         .init_client_instance = hclge_init_client_instance,
7760         .uninit_client_instance = hclge_uninit_client_instance,
7761         .map_ring_to_vector = hclge_map_ring_to_vector,
7762         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
7763         .get_vector = hclge_get_vector,
7764         .put_vector = hclge_put_vector,
7765         .set_promisc_mode = hclge_set_promisc_mode,
7766         .set_loopback = hclge_set_loopback,
7767         .start = hclge_ae_start,
7768         .stop = hclge_ae_stop,
7769         .client_start = hclge_client_start,
7770         .client_stop = hclge_client_stop,
7771         .get_status = hclge_get_status,
7772         .get_ksettings_an_result = hclge_get_ksettings_an_result,
7773         .update_speed_duplex_h = hclge_update_speed_duplex_h,
7774         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
7775         .get_media_type = hclge_get_media_type,
7776         .get_rss_key_size = hclge_get_rss_key_size,
7777         .get_rss_indir_size = hclge_get_rss_indir_size,
7778         .get_rss = hclge_get_rss,
7779         .set_rss = hclge_set_rss,
7780         .set_rss_tuple = hclge_set_rss_tuple,
7781         .get_rss_tuple = hclge_get_rss_tuple,
7782         .get_tc_size = hclge_get_tc_size,
7783         .get_mac_addr = hclge_get_mac_addr,
7784         .set_mac_addr = hclge_set_mac_addr,
7785         .do_ioctl = hclge_do_ioctl,
7786         .add_uc_addr = hclge_add_uc_addr,
7787         .rm_uc_addr = hclge_rm_uc_addr,
7788         .add_mc_addr = hclge_add_mc_addr,
7789         .rm_mc_addr = hclge_rm_mc_addr,
7790         .set_autoneg = hclge_set_autoneg,
7791         .get_autoneg = hclge_get_autoneg,
7792         .get_pauseparam = hclge_get_pauseparam,
7793         .set_pauseparam = hclge_set_pauseparam,
7794         .set_mtu = hclge_set_mtu,
7795         .reset_queue = hclge_reset_tqp,
7796         .get_stats = hclge_get_stats,
7797         .update_stats = hclge_update_stats,
7798         .get_strings = hclge_get_strings,
7799         .get_sset_count = hclge_get_sset_count,
7800         .get_fw_version = hclge_get_fw_version,
7801         .get_mdix_mode = hclge_get_mdix_mode,
7802         .enable_vlan_filter = hclge_enable_vlan_filter,
7803         .set_vlan_filter = hclge_set_vlan_filter,
7804         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
7805         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
7806         .reset_event = hclge_reset_event,
7807         .set_default_reset_request = hclge_set_def_reset_request,
7808         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
7809         .set_channels = hclge_set_channels,
7810         .get_channels = hclge_get_channels,
7811         .get_regs_len = hclge_get_regs_len,
7812         .get_regs = hclge_get_regs,
7813         .set_led_id = hclge_set_led_id,
7814         .get_link_mode = hclge_get_link_mode,
7815         .add_fd_entry = hclge_add_fd_entry,
7816         .del_fd_entry = hclge_del_fd_entry,
7817         .del_all_fd_entries = hclge_del_all_fd_entries,
7818         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
7819         .get_fd_rule_info = hclge_get_fd_rule_info,
7820         .get_fd_all_rules = hclge_get_all_rules,
7821         .restore_fd_rules = hclge_restore_fd_entries,
7822         .enable_fd = hclge_enable_fd,
7823         .dbg_run_cmd = hclge_dbg_run_cmd,
7824         .process_hw_error = hclge_process_ras_hw_error,
7825         .get_hw_reset_stat = hclge_get_hw_reset_stat,
7826         .ae_dev_resetting = hclge_ae_dev_resetting,
7827         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
7828         .set_gro_en = hclge_gro_en,
7829 };
7830
7831 static struct hnae3_ae_algo ae_algo = {
7832         .ops = &hclge_ops,
7833         .pdev_id_table = ae_algo_pci_tbl,
7834 };
7835
7836 static int hclge_init(void)
7837 {
7838         pr_info("%s is initializing\n", HCLGE_NAME);
7839
7840         hnae3_register_ae_algo(&ae_algo);
7841
7842         return 0;
7843 }
7844
7845 static void hclge_exit(void)
7846 {
7847         hnae3_unregister_ae_algo(&ae_algo);
7848 }
7849 module_init(hclge_init);
7850 module_exit(hclge_exit);
7851
7852 MODULE_LICENSE("GPL");
7853 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
7854 MODULE_DESCRIPTION("HCLGE Driver");
7855 MODULE_VERSION(HCLGE_MOD_VERSION);