net: hns3: add support for multiple media type
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37                                u16 *allocated_size, bool is_alloc);
38
39 static struct hnae3_ae_algo ae_algo;
40
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49         /* required last entry */
50         {0, }
51 };
52
53 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
54
55 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56                                          HCLGE_CMDQ_TX_ADDR_H_REG,
57                                          HCLGE_CMDQ_TX_DEPTH_REG,
58                                          HCLGE_CMDQ_TX_TAIL_REG,
59                                          HCLGE_CMDQ_TX_HEAD_REG,
60                                          HCLGE_CMDQ_RX_ADDR_L_REG,
61                                          HCLGE_CMDQ_RX_ADDR_H_REG,
62                                          HCLGE_CMDQ_RX_DEPTH_REG,
63                                          HCLGE_CMDQ_RX_TAIL_REG,
64                                          HCLGE_CMDQ_RX_HEAD_REG,
65                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
66                                          HCLGE_CMDQ_INTR_STS_REG,
67                                          HCLGE_CMDQ_INTR_EN_REG,
68                                          HCLGE_CMDQ_INTR_GEN_REG};
69
70 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71                                            HCLGE_VECTOR0_OTER_EN_REG,
72                                            HCLGE_MISC_RESET_STS_REG,
73                                            HCLGE_MISC_VECTOR_INT_STS,
74                                            HCLGE_GLOBAL_RESET_REG,
75                                            HCLGE_FUN_RST_ING,
76                                            HCLGE_GRO_EN_REG};
77
78 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79                                          HCLGE_RING_RX_ADDR_H_REG,
80                                          HCLGE_RING_RX_BD_NUM_REG,
81                                          HCLGE_RING_RX_BD_LENGTH_REG,
82                                          HCLGE_RING_RX_MERGE_EN_REG,
83                                          HCLGE_RING_RX_TAIL_REG,
84                                          HCLGE_RING_RX_HEAD_REG,
85                                          HCLGE_RING_RX_FBD_NUM_REG,
86                                          HCLGE_RING_RX_OFFSET_REG,
87                                          HCLGE_RING_RX_FBD_OFFSET_REG,
88                                          HCLGE_RING_RX_STASH_REG,
89                                          HCLGE_RING_RX_BD_ERR_REG,
90                                          HCLGE_RING_TX_ADDR_L_REG,
91                                          HCLGE_RING_TX_ADDR_H_REG,
92                                          HCLGE_RING_TX_BD_NUM_REG,
93                                          HCLGE_RING_TX_PRIORITY_REG,
94                                          HCLGE_RING_TX_TC_REG,
95                                          HCLGE_RING_TX_MERGE_EN_REG,
96                                          HCLGE_RING_TX_TAIL_REG,
97                                          HCLGE_RING_TX_HEAD_REG,
98                                          HCLGE_RING_TX_FBD_NUM_REG,
99                                          HCLGE_RING_TX_OFFSET_REG,
100                                          HCLGE_RING_TX_EBD_NUM_REG,
101                                          HCLGE_RING_TX_EBD_OFFSET_REG,
102                                          HCLGE_RING_TX_BD_ERR_REG,
103                                          HCLGE_RING_EN_REG};
104
105 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106                                              HCLGE_TQP_INTR_GL0_REG,
107                                              HCLGE_TQP_INTR_GL1_REG,
108                                              HCLGE_TQP_INTR_GL2_REG,
109                                              HCLGE_TQP_INTR_RL_REG};
110
111 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
112         "App    Loopback test",
113         "Serdes serial Loopback test",
114         "Serdes parallel Loopback test",
115         "Phy    Loopback test"
116 };
117
118 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119         {"mac_tx_mac_pause_num",
120                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121         {"mac_rx_mac_pause_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
123         {"mac_tx_control_pkt_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125         {"mac_rx_control_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127         {"mac_tx_pfc_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
129         {"mac_tx_pfc_pri0_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131         {"mac_tx_pfc_pri1_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133         {"mac_tx_pfc_pri2_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135         {"mac_tx_pfc_pri3_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137         {"mac_tx_pfc_pri4_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139         {"mac_tx_pfc_pri5_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141         {"mac_tx_pfc_pri6_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143         {"mac_tx_pfc_pri7_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
145         {"mac_rx_pfc_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
147         {"mac_rx_pfc_pri0_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149         {"mac_rx_pfc_pri1_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151         {"mac_rx_pfc_pri2_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153         {"mac_rx_pfc_pri3_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155         {"mac_rx_pfc_pri4_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157         {"mac_rx_pfc_pri5_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159         {"mac_rx_pfc_pri6_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161         {"mac_rx_pfc_pri7_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163         {"mac_tx_total_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165         {"mac_tx_total_oct_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167         {"mac_tx_good_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169         {"mac_tx_bad_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171         {"mac_tx_good_oct_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173         {"mac_tx_bad_oct_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175         {"mac_tx_uni_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177         {"mac_tx_multi_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179         {"mac_tx_broad_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181         {"mac_tx_undersize_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
183         {"mac_tx_oversize_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
185         {"mac_tx_64_oct_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187         {"mac_tx_65_127_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189         {"mac_tx_128_255_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191         {"mac_tx_256_511_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193         {"mac_tx_512_1023_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195         {"mac_tx_1024_1518_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
197         {"mac_tx_1519_2047_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199         {"mac_tx_2048_4095_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201         {"mac_tx_4096_8191_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
203         {"mac_tx_8192_9216_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205         {"mac_tx_9217_12287_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207         {"mac_tx_12288_16383_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209         {"mac_tx_1519_max_good_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211         {"mac_tx_1519_max_bad_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
213         {"mac_rx_total_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215         {"mac_rx_total_oct_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217         {"mac_rx_good_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219         {"mac_rx_bad_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221         {"mac_rx_good_oct_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223         {"mac_rx_bad_oct_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225         {"mac_rx_uni_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227         {"mac_rx_multi_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229         {"mac_rx_broad_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231         {"mac_rx_undersize_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
233         {"mac_rx_oversize_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
235         {"mac_rx_64_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237         {"mac_rx_65_127_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239         {"mac_rx_128_255_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241         {"mac_rx_256_511_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243         {"mac_rx_512_1023_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245         {"mac_rx_1024_1518_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
247         {"mac_rx_1519_2047_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249         {"mac_rx_2048_4095_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251         {"mac_rx_4096_8191_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
253         {"mac_rx_8192_9216_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255         {"mac_rx_9217_12287_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257         {"mac_rx_12288_16383_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259         {"mac_rx_1519_max_good_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261         {"mac_rx_1519_max_bad_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
263
264         {"mac_tx_fragment_pkt_num",
265                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266         {"mac_tx_undermin_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268         {"mac_tx_jabber_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270         {"mac_tx_err_all_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272         {"mac_tx_from_app_good_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274         {"mac_tx_from_app_bad_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276         {"mac_rx_fragment_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278         {"mac_rx_undermin_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280         {"mac_rx_jabber_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282         {"mac_rx_fcs_err_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284         {"mac_rx_send_app_good_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286         {"mac_rx_send_app_bad_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
288 };
289
290 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
291         {
292                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296                 .i_port_bitmap = 0x1,
297         },
298 };
299
300 static const u8 hclge_hash_key[] = {
301         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
306 };
307
308 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
309 {
310 #define HCLGE_MAC_CMD_NUM 21
311
312         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
314         __le64 *desc_data;
315         int i, k, n;
316         int ret;
317
318         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
320         if (ret) {
321                 dev_err(&hdev->pdev->dev,
322                         "Get MAC pkt stats fail, status = %d.\n", ret);
323
324                 return ret;
325         }
326
327         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
328                 /* for special opcode 0032, only the first desc has the head */
329                 if (unlikely(i == 0)) {
330                         desc_data = (__le64 *)(&desc[i].data[0]);
331                         n = HCLGE_RD_FIRST_STATS_NUM;
332                 } else {
333                         desc_data = (__le64 *)(&desc[i]);
334                         n = HCLGE_RD_OTHER_STATS_NUM;
335                 }
336
337                 for (k = 0; k < n; k++) {
338                         *data += le64_to_cpu(*desc_data);
339                         data++;
340                         desc_data++;
341                 }
342         }
343
344         return 0;
345 }
346
347 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
348 {
349         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350         struct hclge_desc *desc;
351         __le64 *desc_data;
352         u16 i, k, n;
353         int ret;
354
355         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
356         if (!desc)
357                 return -ENOMEM;
358         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
360         if (ret) {
361                 kfree(desc);
362                 return ret;
363         }
364
365         for (i = 0; i < desc_num; i++) {
366                 /* for special opcode 0034, only the first desc has the head */
367                 if (i == 0) {
368                         desc_data = (__le64 *)(&desc[i].data[0]);
369                         n = HCLGE_RD_FIRST_STATS_NUM;
370                 } else {
371                         desc_data = (__le64 *)(&desc[i]);
372                         n = HCLGE_RD_OTHER_STATS_NUM;
373                 }
374
375                 for (k = 0; k < n; k++) {
376                         *data += le64_to_cpu(*desc_data);
377                         data++;
378                         desc_data++;
379                 }
380         }
381
382         kfree(desc);
383
384         return 0;
385 }
386
387 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
388 {
389         struct hclge_desc desc;
390         __le32 *desc_data;
391         u32 reg_num;
392         int ret;
393
394         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
396         if (ret)
397                 return ret;
398
399         desc_data = (__le32 *)(&desc.data[0]);
400         reg_num = le32_to_cpu(*desc_data);
401
402         *desc_num = 1 + ((reg_num - 3) >> 2) +
403                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
404
405         return 0;
406 }
407
408 static int hclge_mac_update_stats(struct hclge_dev *hdev)
409 {
410         u32 desc_num;
411         int ret;
412
413         ret = hclge_mac_query_reg_num(hdev, &desc_num);
414
415         /* The firmware supports the new statistics acquisition method */
416         if (!ret)
417                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
418         else if (ret == -EOPNOTSUPP)
419                 ret = hclge_mac_update_stats_defective(hdev);
420         else
421                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
422
423         return ret;
424 }
425
426 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
427 {
428         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429         struct hclge_vport *vport = hclge_get_vport(handle);
430         struct hclge_dev *hdev = vport->back;
431         struct hnae3_queue *queue;
432         struct hclge_desc desc[1];
433         struct hclge_tqp *tqp;
434         int ret, i;
435
436         for (i = 0; i < kinfo->num_tqps; i++) {
437                 queue = handle->kinfo.tqp[i];
438                 tqp = container_of(queue, struct hclge_tqp, q);
439                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
440                 hclge_cmd_setup_basic_desc(&desc[0],
441                                            HCLGE_OPC_QUERY_RX_STATUS,
442                                            true);
443
444                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
445                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
446                 if (ret) {
447                         dev_err(&hdev->pdev->dev,
448                                 "Query tqp stat fail, status = %d,queue = %d\n",
449                                 ret,    i);
450                         return ret;
451                 }
452                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
453                         le32_to_cpu(desc[0].data[1]);
454         }
455
456         for (i = 0; i < kinfo->num_tqps; i++) {
457                 queue = handle->kinfo.tqp[i];
458                 tqp = container_of(queue, struct hclge_tqp, q);
459                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
460                 hclge_cmd_setup_basic_desc(&desc[0],
461                                            HCLGE_OPC_QUERY_TX_STATUS,
462                                            true);
463
464                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
465                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
466                 if (ret) {
467                         dev_err(&hdev->pdev->dev,
468                                 "Query tqp stat fail, status = %d,queue = %d\n",
469                                 ret, i);
470                         return ret;
471                 }
472                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
473                         le32_to_cpu(desc[0].data[1]);
474         }
475
476         return 0;
477 }
478
479 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
480 {
481         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482         struct hclge_tqp *tqp;
483         u64 *buff = data;
484         int i;
485
486         for (i = 0; i < kinfo->num_tqps; i++) {
487                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
488                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
489         }
490
491         for (i = 0; i < kinfo->num_tqps; i++) {
492                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
494         }
495
496         return buff;
497 }
498
499 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
500 {
501         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
502
503         return kinfo->num_tqps * (2);
504 }
505
506 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
507 {
508         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
509         u8 *buff = data;
510         int i = 0;
511
512         for (i = 0; i < kinfo->num_tqps; i++) {
513                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514                         struct hclge_tqp, q);
515                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
516                          tqp->index);
517                 buff = buff + ETH_GSTRING_LEN;
518         }
519
520         for (i = 0; i < kinfo->num_tqps; i++) {
521                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522                         struct hclge_tqp, q);
523                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
524                          tqp->index);
525                 buff = buff + ETH_GSTRING_LEN;
526         }
527
528         return buff;
529 }
530
531 static u64 *hclge_comm_get_stats(void *comm_stats,
532                                  const struct hclge_comm_stats_str strs[],
533                                  int size, u64 *data)
534 {
535         u64 *buf = data;
536         u32 i;
537
538         for (i = 0; i < size; i++)
539                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
540
541         return buf + size;
542 }
543
544 static u8 *hclge_comm_get_strings(u32 stringset,
545                                   const struct hclge_comm_stats_str strs[],
546                                   int size, u8 *data)
547 {
548         char *buff = (char *)data;
549         u32 i;
550
551         if (stringset != ETH_SS_STATS)
552                 return buff;
553
554         for (i = 0; i < size; i++) {
555                 snprintf(buff, ETH_GSTRING_LEN,
556                          strs[i].desc);
557                 buff = buff + ETH_GSTRING_LEN;
558         }
559
560         return (u8 *)buff;
561 }
562
563 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
564 {
565         struct hnae3_handle *handle;
566         int status;
567
568         handle = &hdev->vport[0].nic;
569         if (handle->client) {
570                 status = hclge_tqps_update_stats(handle);
571                 if (status) {
572                         dev_err(&hdev->pdev->dev,
573                                 "Update TQPS stats fail, status = %d.\n",
574                                 status);
575                 }
576         }
577
578         status = hclge_mac_update_stats(hdev);
579         if (status)
580                 dev_err(&hdev->pdev->dev,
581                         "Update MAC stats fail, status = %d.\n", status);
582 }
583
584 static void hclge_update_stats(struct hnae3_handle *handle,
585                                struct net_device_stats *net_stats)
586 {
587         struct hclge_vport *vport = hclge_get_vport(handle);
588         struct hclge_dev *hdev = vport->back;
589         int status;
590
591         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
592                 return;
593
594         status = hclge_mac_update_stats(hdev);
595         if (status)
596                 dev_err(&hdev->pdev->dev,
597                         "Update MAC stats fail, status = %d.\n",
598                         status);
599
600         status = hclge_tqps_update_stats(handle);
601         if (status)
602                 dev_err(&hdev->pdev->dev,
603                         "Update TQPS stats fail, status = %d.\n",
604                         status);
605
606         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
607 }
608
609 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
610 {
611 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612                 HNAE3_SUPPORT_PHY_LOOPBACK |\
613                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
615
616         struct hclge_vport *vport = hclge_get_vport(handle);
617         struct hclge_dev *hdev = vport->back;
618         int count = 0;
619
620         /* Loopback test support rules:
621          * mac: only GE mode support
622          * serdes: all mac mode will support include GE/XGE/LGE/CGE
623          * phy: only support when phy device exist on board
624          */
625         if (stringset == ETH_SS_TEST) {
626                 /* clear loopback bit flags at first */
627                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
628                 if (hdev->pdev->revision >= 0x21 ||
629                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
630                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
632                         count += 1;
633                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
634                 }
635
636                 count += 2;
637                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
639         } else if (stringset == ETH_SS_STATS) {
640                 count = ARRAY_SIZE(g_mac_stats_string) +
641                         hclge_tqps_get_sset_count(handle, stringset);
642         }
643
644         return count;
645 }
646
647 static void hclge_get_strings(struct hnae3_handle *handle,
648                               u32 stringset,
649                               u8 *data)
650 {
651         u8 *p = (char *)data;
652         int size;
653
654         if (stringset == ETH_SS_STATS) {
655                 size = ARRAY_SIZE(g_mac_stats_string);
656                 p = hclge_comm_get_strings(stringset,
657                                            g_mac_stats_string,
658                                            size,
659                                            p);
660                 p = hclge_tqps_get_strings(handle, p);
661         } else if (stringset == ETH_SS_TEST) {
662                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
663                         memcpy(p,
664                                hns3_nic_test_strs[HNAE3_LOOP_APP],
665                                ETH_GSTRING_LEN);
666                         p += ETH_GSTRING_LEN;
667                 }
668                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
669                         memcpy(p,
670                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
671                                ETH_GSTRING_LEN);
672                         p += ETH_GSTRING_LEN;
673                 }
674                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
675                         memcpy(p,
676                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
677                                ETH_GSTRING_LEN);
678                         p += ETH_GSTRING_LEN;
679                 }
680                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
681                         memcpy(p,
682                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
683                                ETH_GSTRING_LEN);
684                         p += ETH_GSTRING_LEN;
685                 }
686         }
687 }
688
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
690 {
691         struct hclge_vport *vport = hclge_get_vport(handle);
692         struct hclge_dev *hdev = vport->back;
693         u64 *p;
694
695         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
696                                  g_mac_stats_string,
697                                  ARRAY_SIZE(g_mac_stats_string),
698                                  data);
699         p = hclge_tqps_get_stats(handle, p);
700 }
701
702 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
703                                      u64 *rx_cnt)
704 {
705         struct hclge_vport *vport = hclge_get_vport(handle);
706         struct hclge_dev *hdev = vport->back;
707
708         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
709         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
710 }
711
712 static int hclge_parse_func_status(struct hclge_dev *hdev,
713                                    struct hclge_func_status_cmd *status)
714 {
715         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
716                 return -EINVAL;
717
718         /* Set the pf to main pf */
719         if (status->pf_state & HCLGE_PF_STATE_MAIN)
720                 hdev->flag |= HCLGE_FLAG_MAIN;
721         else
722                 hdev->flag &= ~HCLGE_FLAG_MAIN;
723
724         return 0;
725 }
726
727 static int hclge_query_function_status(struct hclge_dev *hdev)
728 {
729         struct hclge_func_status_cmd *req;
730         struct hclge_desc desc;
731         int timeout = 0;
732         int ret;
733
734         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
735         req = (struct hclge_func_status_cmd *)desc.data;
736
737         do {
738                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
739                 if (ret) {
740                         dev_err(&hdev->pdev->dev,
741                                 "query function status failed %d.\n",
742                                 ret);
743
744                         return ret;
745                 }
746
747                 /* Check pf reset is done */
748                 if (req->pf_state)
749                         break;
750                 usleep_range(1000, 2000);
751         } while (timeout++ < 5);
752
753         ret = hclge_parse_func_status(hdev, req);
754
755         return ret;
756 }
757
758 static int hclge_query_pf_resource(struct hclge_dev *hdev)
759 {
760         struct hclge_pf_res_cmd *req;
761         struct hclge_desc desc;
762         int ret;
763
764         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
765         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
766         if (ret) {
767                 dev_err(&hdev->pdev->dev,
768                         "query pf resource failed %d.\n", ret);
769                 return ret;
770         }
771
772         req = (struct hclge_pf_res_cmd *)desc.data;
773         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
774         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
775
776         if (req->tx_buf_size)
777                 hdev->tx_buf_size =
778                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
779         else
780                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
781
782         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
783
784         if (req->dv_buf_size)
785                 hdev->dv_buf_size =
786                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
787         else
788                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
789
790         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
791
792         if (hnae3_dev_roce_supported(hdev)) {
793                 hdev->roce_base_msix_offset =
794                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
795                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
796                 hdev->num_roce_msi =
797                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
799
800                 /* PF should have NIC vectors and Roce vectors,
801                  * NIC vectors are queued before Roce vectors.
802                  */
803                 hdev->num_msi = hdev->num_roce_msi  +
804                                 hdev->roce_base_msix_offset;
805         } else {
806                 hdev->num_msi =
807                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
808                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
809         }
810
811         return 0;
812 }
813
814 static int hclge_parse_speed(int speed_cmd, int *speed)
815 {
816         switch (speed_cmd) {
817         case 6:
818                 *speed = HCLGE_MAC_SPEED_10M;
819                 break;
820         case 7:
821                 *speed = HCLGE_MAC_SPEED_100M;
822                 break;
823         case 0:
824                 *speed = HCLGE_MAC_SPEED_1G;
825                 break;
826         case 1:
827                 *speed = HCLGE_MAC_SPEED_10G;
828                 break;
829         case 2:
830                 *speed = HCLGE_MAC_SPEED_25G;
831                 break;
832         case 3:
833                 *speed = HCLGE_MAC_SPEED_40G;
834                 break;
835         case 4:
836                 *speed = HCLGE_MAC_SPEED_50G;
837                 break;
838         case 5:
839                 *speed = HCLGE_MAC_SPEED_100G;
840                 break;
841         default:
842                 return -EINVAL;
843         }
844
845         return 0;
846 }
847
848 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
849 {
850         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
851                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
852                                  mac->supported);
853         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
854                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
855                                  mac->supported);
856         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
857                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
858                                  mac->supported);
859         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
860                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
861                                  mac->supported);
862         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
863                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
864                                  mac->supported);
865 }
866
867 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
868 {
869         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
870                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
871                                  mac->supported);
872         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
873                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
874                                  mac->supported);
875         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
876                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
877                                  mac->supported);
878         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
879                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
880                                  mac->supported);
881         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
882                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
883                                  mac->supported);
884 }
885
886 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
887 {
888         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
889                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
890                                  mac->supported);
891         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
892                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
893                                  mac->supported);
894         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
895                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
896                                  mac->supported);
897         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
898                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
899                                  mac->supported);
900         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
901                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
902                                  mac->supported);
903 }
904
905 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
906 {
907         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
908                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
909                                  mac->supported);
910         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
911                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
912                                  mac->supported);
913         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
914                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
915                                  mac->supported);
916         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
917                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
918                                  mac->supported);
919         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
921                                  mac->supported);
922         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
923                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
924                                  mac->supported);
925 }
926
927 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
928                                         u8 speed_ability)
929 {
930         struct hclge_mac *mac = &hdev->hw.mac;
931
932         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
933                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
934                                  mac->supported);
935
936         hclge_convert_setting_sr(mac, speed_ability);
937         hclge_convert_setting_lr(mac, speed_ability);
938         hclge_convert_setting_cr(mac, speed_ability);
939
940         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
941         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
942 }
943
944 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
945                                             u8 speed_ability)
946 {
947         struct hclge_mac *mac = &hdev->hw.mac;
948
949         hclge_convert_setting_kr(mac, speed_ability);
950         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
951         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
952 }
953
954 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
955                                          u8 speed_ability)
956 {
957         unsigned long *supported = hdev->hw.mac.supported;
958
959         /* default to support all speed for GE port */
960         if (!speed_ability)
961                 speed_ability = HCLGE_SUPPORT_GE;
962
963         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
964                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
965                                  supported);
966
967         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
968                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
969                                  supported);
970                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
971                                  supported);
972         }
973
974         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
975                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
976                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
977         }
978
979         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
980         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
981         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
982 }
983
984 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
985 {
986         u8 media_type = hdev->hw.mac.media_type;
987
988         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
989                 hclge_parse_fiber_link_mode(hdev, speed_ability);
990         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
991                 hclge_parse_copper_link_mode(hdev, speed_ability);
992         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
993                 hclge_parse_backplane_link_mode(hdev, speed_ability);
994 }
995 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
996 {
997         struct hclge_cfg_param_cmd *req;
998         u64 mac_addr_tmp_high;
999         u64 mac_addr_tmp;
1000         int i;
1001
1002         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1003
1004         /* get the configuration */
1005         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1006                                               HCLGE_CFG_VMDQ_M,
1007                                               HCLGE_CFG_VMDQ_S);
1008         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1009                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1010         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1011                                             HCLGE_CFG_TQP_DESC_N_M,
1012                                             HCLGE_CFG_TQP_DESC_N_S);
1013
1014         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1015                                         HCLGE_CFG_PHY_ADDR_M,
1016                                         HCLGE_CFG_PHY_ADDR_S);
1017         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1018                                           HCLGE_CFG_MEDIA_TP_M,
1019                                           HCLGE_CFG_MEDIA_TP_S);
1020         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1021                                           HCLGE_CFG_RX_BUF_LEN_M,
1022                                           HCLGE_CFG_RX_BUF_LEN_S);
1023         /* get mac_address */
1024         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1025         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1026                                             HCLGE_CFG_MAC_ADDR_H_M,
1027                                             HCLGE_CFG_MAC_ADDR_H_S);
1028
1029         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1030
1031         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1032                                              HCLGE_CFG_DEFAULT_SPEED_M,
1033                                              HCLGE_CFG_DEFAULT_SPEED_S);
1034         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1035                                             HCLGE_CFG_RSS_SIZE_M,
1036                                             HCLGE_CFG_RSS_SIZE_S);
1037
1038         for (i = 0; i < ETH_ALEN; i++)
1039                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1040
1041         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1042         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1043
1044         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1045                                              HCLGE_CFG_SPEED_ABILITY_M,
1046                                              HCLGE_CFG_SPEED_ABILITY_S);
1047         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1048                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1049                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1050         if (!cfg->umv_space)
1051                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1052 }
1053
1054 /* hclge_get_cfg: query the static parameter from flash
1055  * @hdev: pointer to struct hclge_dev
1056  * @hcfg: the config structure to be getted
1057  */
1058 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1059 {
1060         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1061         struct hclge_cfg_param_cmd *req;
1062         int i, ret;
1063
1064         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1065                 u32 offset = 0;
1066
1067                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1068                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1069                                            true);
1070                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1071                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1072                 /* Len should be united by 4 bytes when send to hardware */
1073                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1074                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1075                 req->offset = cpu_to_le32(offset);
1076         }
1077
1078         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1079         if (ret) {
1080                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1081                 return ret;
1082         }
1083
1084         hclge_parse_cfg(hcfg, desc);
1085
1086         return 0;
1087 }
1088
1089 static int hclge_get_cap(struct hclge_dev *hdev)
1090 {
1091         int ret;
1092
1093         ret = hclge_query_function_status(hdev);
1094         if (ret) {
1095                 dev_err(&hdev->pdev->dev,
1096                         "query function status error %d.\n", ret);
1097                 return ret;
1098         }
1099
1100         /* get pf resource */
1101         ret = hclge_query_pf_resource(hdev);
1102         if (ret)
1103                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1104
1105         return ret;
1106 }
1107
1108 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1109 {
1110 #define HCLGE_MIN_TX_DESC       64
1111 #define HCLGE_MIN_RX_DESC       64
1112
1113         if (!is_kdump_kernel())
1114                 return;
1115
1116         dev_info(&hdev->pdev->dev,
1117                  "Running kdump kernel. Using minimal resources\n");
1118
1119         /* minimal queue pairs equals to the number of vports */
1120         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1121         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1122         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1123 }
1124
1125 static int hclge_configure(struct hclge_dev *hdev)
1126 {
1127         struct hclge_cfg cfg;
1128         int ret, i;
1129
1130         ret = hclge_get_cfg(hdev, &cfg);
1131         if (ret) {
1132                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1133                 return ret;
1134         }
1135
1136         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1137         hdev->base_tqp_pid = 0;
1138         hdev->rss_size_max = cfg.rss_size_max;
1139         hdev->rx_buf_len = cfg.rx_buf_len;
1140         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1141         hdev->hw.mac.media_type = cfg.media_type;
1142         hdev->hw.mac.phy_addr = cfg.phy_addr;
1143         hdev->num_tx_desc = cfg.tqp_desc_num;
1144         hdev->num_rx_desc = cfg.tqp_desc_num;
1145         hdev->tm_info.num_pg = 1;
1146         hdev->tc_max = cfg.tc_num;
1147         hdev->tm_info.hw_pfc_map = 0;
1148         hdev->wanted_umv_size = cfg.umv_space;
1149
1150         if (hnae3_dev_fd_supported(hdev))
1151                 hdev->fd_en = true;
1152
1153         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1154         if (ret) {
1155                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1156                 return ret;
1157         }
1158
1159         hclge_parse_link_mode(hdev, cfg.speed_ability);
1160
1161         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1162             (hdev->tc_max < 1)) {
1163                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1164                          hdev->tc_max);
1165                 hdev->tc_max = 1;
1166         }
1167
1168         /* Dev does not support DCB */
1169         if (!hnae3_dev_dcb_supported(hdev)) {
1170                 hdev->tc_max = 1;
1171                 hdev->pfc_max = 0;
1172         } else {
1173                 hdev->pfc_max = hdev->tc_max;
1174         }
1175
1176         hdev->tm_info.num_tc = 1;
1177
1178         /* Currently not support uncontiuous tc */
1179         for (i = 0; i < hdev->tm_info.num_tc; i++)
1180                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1181
1182         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1183
1184         hclge_init_kdump_kernel_config(hdev);
1185
1186         return ret;
1187 }
1188
1189 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1190                             int tso_mss_max)
1191 {
1192         struct hclge_cfg_tso_status_cmd *req;
1193         struct hclge_desc desc;
1194         u16 tso_mss;
1195
1196         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1197
1198         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1199
1200         tso_mss = 0;
1201         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1202                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1203         req->tso_mss_min = cpu_to_le16(tso_mss);
1204
1205         tso_mss = 0;
1206         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1207                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1208         req->tso_mss_max = cpu_to_le16(tso_mss);
1209
1210         return hclge_cmd_send(&hdev->hw, &desc, 1);
1211 }
1212
1213 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1214 {
1215         struct hclge_cfg_gro_status_cmd *req;
1216         struct hclge_desc desc;
1217         int ret;
1218
1219         if (!hnae3_dev_gro_supported(hdev))
1220                 return 0;
1221
1222         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1223         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1224
1225         req->gro_en = cpu_to_le16(en ? 1 : 0);
1226
1227         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1228         if (ret)
1229                 dev_err(&hdev->pdev->dev,
1230                         "GRO hardware config cmd failed, ret = %d\n", ret);
1231
1232         return ret;
1233 }
1234
1235 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1236 {
1237         struct hclge_tqp *tqp;
1238         int i;
1239
1240         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1241                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1242         if (!hdev->htqp)
1243                 return -ENOMEM;
1244
1245         tqp = hdev->htqp;
1246
1247         for (i = 0; i < hdev->num_tqps; i++) {
1248                 tqp->dev = &hdev->pdev->dev;
1249                 tqp->index = i;
1250
1251                 tqp->q.ae_algo = &ae_algo;
1252                 tqp->q.buf_size = hdev->rx_buf_len;
1253                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1254                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1255                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1256                         i * HCLGE_TQP_REG_SIZE;
1257
1258                 tqp++;
1259         }
1260
1261         return 0;
1262 }
1263
1264 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1265                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1266 {
1267         struct hclge_tqp_map_cmd *req;
1268         struct hclge_desc desc;
1269         int ret;
1270
1271         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1272
1273         req = (struct hclge_tqp_map_cmd *)desc.data;
1274         req->tqp_id = cpu_to_le16(tqp_pid);
1275         req->tqp_vf = func_id;
1276         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1277                         1 << HCLGE_TQP_MAP_EN_B;
1278         req->tqp_vid = cpu_to_le16(tqp_vid);
1279
1280         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1281         if (ret)
1282                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1283
1284         return ret;
1285 }
1286
1287 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1288 {
1289         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1290         struct hclge_dev *hdev = vport->back;
1291         int i, alloced;
1292
1293         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1294              alloced < num_tqps; i++) {
1295                 if (!hdev->htqp[i].alloced) {
1296                         hdev->htqp[i].q.handle = &vport->nic;
1297                         hdev->htqp[i].q.tqp_index = alloced;
1298                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1299                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1300                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1301                         hdev->htqp[i].alloced = true;
1302                         alloced++;
1303                 }
1304         }
1305         vport->alloc_tqps = alloced;
1306         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1307                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1308
1309         return 0;
1310 }
1311
1312 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1313                             u16 num_tx_desc, u16 num_rx_desc)
1314
1315 {
1316         struct hnae3_handle *nic = &vport->nic;
1317         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1318         struct hclge_dev *hdev = vport->back;
1319         int ret;
1320
1321         kinfo->num_tx_desc = num_tx_desc;
1322         kinfo->num_rx_desc = num_rx_desc;
1323
1324         kinfo->rx_buf_len = hdev->rx_buf_len;
1325
1326         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1327                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1328         if (!kinfo->tqp)
1329                 return -ENOMEM;
1330
1331         ret = hclge_assign_tqp(vport, num_tqps);
1332         if (ret)
1333                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1334
1335         return ret;
1336 }
1337
1338 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1339                                   struct hclge_vport *vport)
1340 {
1341         struct hnae3_handle *nic = &vport->nic;
1342         struct hnae3_knic_private_info *kinfo;
1343         u16 i;
1344
1345         kinfo = &nic->kinfo;
1346         for (i = 0; i < vport->alloc_tqps; i++) {
1347                 struct hclge_tqp *q =
1348                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1349                 bool is_pf;
1350                 int ret;
1351
1352                 is_pf = !(vport->vport_id);
1353                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1354                                              i, is_pf);
1355                 if (ret)
1356                         return ret;
1357         }
1358
1359         return 0;
1360 }
1361
1362 static int hclge_map_tqp(struct hclge_dev *hdev)
1363 {
1364         struct hclge_vport *vport = hdev->vport;
1365         u16 i, num_vport;
1366
1367         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1368         for (i = 0; i < num_vport; i++) {
1369                 int ret;
1370
1371                 ret = hclge_map_tqp_to_vport(hdev, vport);
1372                 if (ret)
1373                         return ret;
1374
1375                 vport++;
1376         }
1377
1378         return 0;
1379 }
1380
1381 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1382 {
1383         /* this would be initialized later */
1384 }
1385
1386 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1387 {
1388         struct hnae3_handle *nic = &vport->nic;
1389         struct hclge_dev *hdev = vport->back;
1390         int ret;
1391
1392         nic->pdev = hdev->pdev;
1393         nic->ae_algo = &ae_algo;
1394         nic->numa_node_mask = hdev->numa_node_mask;
1395
1396         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1397                 ret = hclge_knic_setup(vport, num_tqps,
1398                                        hdev->num_tx_desc, hdev->num_rx_desc);
1399
1400                 if (ret) {
1401                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1402                                 ret);
1403                         return ret;
1404                 }
1405         } else {
1406                 hclge_unic_setup(vport, num_tqps);
1407         }
1408
1409         return 0;
1410 }
1411
1412 static int hclge_alloc_vport(struct hclge_dev *hdev)
1413 {
1414         struct pci_dev *pdev = hdev->pdev;
1415         struct hclge_vport *vport;
1416         u32 tqp_main_vport;
1417         u32 tqp_per_vport;
1418         int num_vport, i;
1419         int ret;
1420
1421         /* We need to alloc a vport for main NIC of PF */
1422         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1423
1424         if (hdev->num_tqps < num_vport) {
1425                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1426                         hdev->num_tqps, num_vport);
1427                 return -EINVAL;
1428         }
1429
1430         /* Alloc the same number of TQPs for every vport */
1431         tqp_per_vport = hdev->num_tqps / num_vport;
1432         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1433
1434         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1435                              GFP_KERNEL);
1436         if (!vport)
1437                 return -ENOMEM;
1438
1439         hdev->vport = vport;
1440         hdev->num_alloc_vport = num_vport;
1441
1442         if (IS_ENABLED(CONFIG_PCI_IOV))
1443                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1444
1445         for (i = 0; i < num_vport; i++) {
1446                 vport->back = hdev;
1447                 vport->vport_id = i;
1448                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1449                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1450                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1451                 INIT_LIST_HEAD(&vport->vlan_list);
1452                 INIT_LIST_HEAD(&vport->uc_mac_list);
1453                 INIT_LIST_HEAD(&vport->mc_mac_list);
1454
1455                 if (i == 0)
1456                         ret = hclge_vport_setup(vport, tqp_main_vport);
1457                 else
1458                         ret = hclge_vport_setup(vport, tqp_per_vport);
1459                 if (ret) {
1460                         dev_err(&pdev->dev,
1461                                 "vport setup failed for vport %d, %d\n",
1462                                 i, ret);
1463                         return ret;
1464                 }
1465
1466                 vport++;
1467         }
1468
1469         return 0;
1470 }
1471
1472 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1473                                     struct hclge_pkt_buf_alloc *buf_alloc)
1474 {
1475 /* TX buffer size is unit by 128 byte */
1476 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1477 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1478         struct hclge_tx_buff_alloc_cmd *req;
1479         struct hclge_desc desc;
1480         int ret;
1481         u8 i;
1482
1483         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1484
1485         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1486         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1487                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1488
1489                 req->tx_pkt_buff[i] =
1490                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1491                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1492         }
1493
1494         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1495         if (ret)
1496                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1497                         ret);
1498
1499         return ret;
1500 }
1501
1502 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1503                                  struct hclge_pkt_buf_alloc *buf_alloc)
1504 {
1505         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1506
1507         if (ret)
1508                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1509
1510         return ret;
1511 }
1512
1513 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1514 {
1515         int i, cnt = 0;
1516
1517         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1518                 if (hdev->hw_tc_map & BIT(i))
1519                         cnt++;
1520         return cnt;
1521 }
1522
1523 /* Get the number of pfc enabled TCs, which have private buffer */
1524 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1525                                   struct hclge_pkt_buf_alloc *buf_alloc)
1526 {
1527         struct hclge_priv_buf *priv;
1528         int i, cnt = 0;
1529
1530         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1531                 priv = &buf_alloc->priv_buf[i];
1532                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1533                     priv->enable)
1534                         cnt++;
1535         }
1536
1537         return cnt;
1538 }
1539
1540 /* Get the number of pfc disabled TCs, which have private buffer */
1541 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1542                                      struct hclge_pkt_buf_alloc *buf_alloc)
1543 {
1544         struct hclge_priv_buf *priv;
1545         int i, cnt = 0;
1546
1547         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1548                 priv = &buf_alloc->priv_buf[i];
1549                 if (hdev->hw_tc_map & BIT(i) &&
1550                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1551                     priv->enable)
1552                         cnt++;
1553         }
1554
1555         return cnt;
1556 }
1557
1558 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1559 {
1560         struct hclge_priv_buf *priv;
1561         u32 rx_priv = 0;
1562         int i;
1563
1564         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1565                 priv = &buf_alloc->priv_buf[i];
1566                 if (priv->enable)
1567                         rx_priv += priv->buf_size;
1568         }
1569         return rx_priv;
1570 }
1571
1572 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1573 {
1574         u32 i, total_tx_size = 0;
1575
1576         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1577                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1578
1579         return total_tx_size;
1580 }
1581
1582 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1583                                 struct hclge_pkt_buf_alloc *buf_alloc,
1584                                 u32 rx_all)
1585 {
1586         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1587         u32 tc_num = hclge_get_tc_num(hdev);
1588         u32 shared_buf, aligned_mps;
1589         u32 rx_priv;
1590         int i;
1591
1592         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1593
1594         if (hnae3_dev_dcb_supported(hdev))
1595                 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1596         else
1597                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1598                                         + hdev->dv_buf_size;
1599
1600         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1601         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1602                              HCLGE_BUF_SIZE_UNIT);
1603
1604         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1605         if (rx_all < rx_priv + shared_std)
1606                 return false;
1607
1608         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1609         buf_alloc->s_buf.buf_size = shared_buf;
1610         if (hnae3_dev_dcb_supported(hdev)) {
1611                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1612                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1613                         - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1614         } else {
1615                 buf_alloc->s_buf.self.high = aligned_mps +
1616                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1617                 buf_alloc->s_buf.self.low = aligned_mps;
1618         }
1619
1620         if (hnae3_dev_dcb_supported(hdev)) {
1621                 if (tc_num)
1622                         hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1623                 else
1624                         hi_thrd = shared_buf - hdev->dv_buf_size;
1625
1626                 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1627                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1628                 lo_thrd = hi_thrd - aligned_mps / 2;
1629         } else {
1630                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1631                 lo_thrd = aligned_mps;
1632         }
1633
1634         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1635                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1636                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1637         }
1638
1639         return true;
1640 }
1641
1642 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1643                                 struct hclge_pkt_buf_alloc *buf_alloc)
1644 {
1645         u32 i, total_size;
1646
1647         total_size = hdev->pkt_buf_size;
1648
1649         /* alloc tx buffer for all enabled tc */
1650         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1651                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1652
1653                 if (hdev->hw_tc_map & BIT(i)) {
1654                         if (total_size < hdev->tx_buf_size)
1655                                 return -ENOMEM;
1656
1657                         priv->tx_buf_size = hdev->tx_buf_size;
1658                 } else {
1659                         priv->tx_buf_size = 0;
1660                 }
1661
1662                 total_size -= priv->tx_buf_size;
1663         }
1664
1665         return 0;
1666 }
1667
1668 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1669                                   struct hclge_pkt_buf_alloc *buf_alloc)
1670 {
1671         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1672         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1673         int i;
1674
1675         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1676                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1677
1678                 priv->enable = 0;
1679                 priv->wl.low = 0;
1680                 priv->wl.high = 0;
1681                 priv->buf_size = 0;
1682
1683                 if (!(hdev->hw_tc_map & BIT(i)))
1684                         continue;
1685
1686                 priv->enable = 1;
1687
1688                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1689                         priv->wl.low = max ? aligned_mps : 256;
1690                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1691                                                 HCLGE_BUF_SIZE_UNIT);
1692                 } else {
1693                         priv->wl.low = 0;
1694                         priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1695                 }
1696
1697                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1698         }
1699
1700         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1701 }
1702
1703 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1704                                           struct hclge_pkt_buf_alloc *buf_alloc)
1705 {
1706         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1707         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1708         int i;
1709
1710         /* let the last to be cleared first */
1711         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1712                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1713
1714                 if (hdev->hw_tc_map & BIT(i) &&
1715                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1716                         /* Clear the no pfc TC private buffer */
1717                         priv->wl.low = 0;
1718                         priv->wl.high = 0;
1719                         priv->buf_size = 0;
1720                         priv->enable = 0;
1721                         no_pfc_priv_num--;
1722                 }
1723
1724                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1725                     no_pfc_priv_num == 0)
1726                         break;
1727         }
1728
1729         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1730 }
1731
1732 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1733                                         struct hclge_pkt_buf_alloc *buf_alloc)
1734 {
1735         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1736         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1737         int i;
1738
1739         /* let the last to be cleared first */
1740         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1741                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1742
1743                 if (hdev->hw_tc_map & BIT(i) &&
1744                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1745                         /* Reduce the number of pfc TC with private buffer */
1746                         priv->wl.low = 0;
1747                         priv->enable = 0;
1748                         priv->wl.high = 0;
1749                         priv->buf_size = 0;
1750                         pfc_priv_num--;
1751                 }
1752
1753                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1754                     pfc_priv_num == 0)
1755                         break;
1756         }
1757
1758         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1759 }
1760
1761 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1762  * @hdev: pointer to struct hclge_dev
1763  * @buf_alloc: pointer to buffer calculation data
1764  * @return: 0: calculate sucessful, negative: fail
1765  */
1766 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1767                                 struct hclge_pkt_buf_alloc *buf_alloc)
1768 {
1769         /* When DCB is not supported, rx private buffer is not allocated. */
1770         if (!hnae3_dev_dcb_supported(hdev)) {
1771                 u32 rx_all = hdev->pkt_buf_size;
1772
1773                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1774                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1775                         return -ENOMEM;
1776
1777                 return 0;
1778         }
1779
1780         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1781                 return 0;
1782
1783         /* try to decrease the buffer size */
1784         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1785                 return 0;
1786
1787         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1788                 return 0;
1789
1790         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1791                 return 0;
1792
1793         return -ENOMEM;
1794 }
1795
1796 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1797                                    struct hclge_pkt_buf_alloc *buf_alloc)
1798 {
1799         struct hclge_rx_priv_buff_cmd *req;
1800         struct hclge_desc desc;
1801         int ret;
1802         int i;
1803
1804         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1805         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1806
1807         /* Alloc private buffer TCs */
1808         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1809                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1810
1811                 req->buf_num[i] =
1812                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1813                 req->buf_num[i] |=
1814                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1815         }
1816
1817         req->shared_buf =
1818                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1819                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1820
1821         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1822         if (ret)
1823                 dev_err(&hdev->pdev->dev,
1824                         "rx private buffer alloc cmd failed %d\n", ret);
1825
1826         return ret;
1827 }
1828
1829 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1830                                    struct hclge_pkt_buf_alloc *buf_alloc)
1831 {
1832         struct hclge_rx_priv_wl_buf *req;
1833         struct hclge_priv_buf *priv;
1834         struct hclge_desc desc[2];
1835         int i, j;
1836         int ret;
1837
1838         for (i = 0; i < 2; i++) {
1839                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1840                                            false);
1841                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1842
1843                 /* The first descriptor set the NEXT bit to 1 */
1844                 if (i == 0)
1845                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1846                 else
1847                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1848
1849                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1850                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1851
1852                         priv = &buf_alloc->priv_buf[idx];
1853                         req->tc_wl[j].high =
1854                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1855                         req->tc_wl[j].high |=
1856                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1857                         req->tc_wl[j].low =
1858                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1859                         req->tc_wl[j].low |=
1860                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1861                 }
1862         }
1863
1864         /* Send 2 descriptor at one time */
1865         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1866         if (ret)
1867                 dev_err(&hdev->pdev->dev,
1868                         "rx private waterline config cmd failed %d\n",
1869                         ret);
1870         return ret;
1871 }
1872
1873 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1874                                     struct hclge_pkt_buf_alloc *buf_alloc)
1875 {
1876         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1877         struct hclge_rx_com_thrd *req;
1878         struct hclge_desc desc[2];
1879         struct hclge_tc_thrd *tc;
1880         int i, j;
1881         int ret;
1882
1883         for (i = 0; i < 2; i++) {
1884                 hclge_cmd_setup_basic_desc(&desc[i],
1885                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1886                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1887
1888                 /* The first descriptor set the NEXT bit to 1 */
1889                 if (i == 0)
1890                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1891                 else
1892                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1893
1894                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1895                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1896
1897                         req->com_thrd[j].high =
1898                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1899                         req->com_thrd[j].high |=
1900                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1901                         req->com_thrd[j].low =
1902                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1903                         req->com_thrd[j].low |=
1904                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1905                 }
1906         }
1907
1908         /* Send 2 descriptors at one time */
1909         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1910         if (ret)
1911                 dev_err(&hdev->pdev->dev,
1912                         "common threshold config cmd failed %d\n", ret);
1913         return ret;
1914 }
1915
1916 static int hclge_common_wl_config(struct hclge_dev *hdev,
1917                                   struct hclge_pkt_buf_alloc *buf_alloc)
1918 {
1919         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1920         struct hclge_rx_com_wl *req;
1921         struct hclge_desc desc;
1922         int ret;
1923
1924         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1925
1926         req = (struct hclge_rx_com_wl *)desc.data;
1927         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1928         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1929
1930         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1931         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1932
1933         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1934         if (ret)
1935                 dev_err(&hdev->pdev->dev,
1936                         "common waterline config cmd failed %d\n", ret);
1937
1938         return ret;
1939 }
1940
1941 int hclge_buffer_alloc(struct hclge_dev *hdev)
1942 {
1943         struct hclge_pkt_buf_alloc *pkt_buf;
1944         int ret;
1945
1946         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1947         if (!pkt_buf)
1948                 return -ENOMEM;
1949
1950         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1951         if (ret) {
1952                 dev_err(&hdev->pdev->dev,
1953                         "could not calc tx buffer size for all TCs %d\n", ret);
1954                 goto out;
1955         }
1956
1957         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1958         if (ret) {
1959                 dev_err(&hdev->pdev->dev,
1960                         "could not alloc tx buffers %d\n", ret);
1961                 goto out;
1962         }
1963
1964         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1965         if (ret) {
1966                 dev_err(&hdev->pdev->dev,
1967                         "could not calc rx priv buffer size for all TCs %d\n",
1968                         ret);
1969                 goto out;
1970         }
1971
1972         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1973         if (ret) {
1974                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1975                         ret);
1976                 goto out;
1977         }
1978
1979         if (hnae3_dev_dcb_supported(hdev)) {
1980                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1981                 if (ret) {
1982                         dev_err(&hdev->pdev->dev,
1983                                 "could not configure rx private waterline %d\n",
1984                                 ret);
1985                         goto out;
1986                 }
1987
1988                 ret = hclge_common_thrd_config(hdev, pkt_buf);
1989                 if (ret) {
1990                         dev_err(&hdev->pdev->dev,
1991                                 "could not configure common threshold %d\n",
1992                                 ret);
1993                         goto out;
1994                 }
1995         }
1996
1997         ret = hclge_common_wl_config(hdev, pkt_buf);
1998         if (ret)
1999                 dev_err(&hdev->pdev->dev,
2000                         "could not configure common waterline %d\n", ret);
2001
2002 out:
2003         kfree(pkt_buf);
2004         return ret;
2005 }
2006
2007 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2008 {
2009         struct hnae3_handle *roce = &vport->roce;
2010         struct hnae3_handle *nic = &vport->nic;
2011
2012         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2013
2014         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2015             vport->back->num_msi_left == 0)
2016                 return -EINVAL;
2017
2018         roce->rinfo.base_vector = vport->back->roce_base_vector;
2019
2020         roce->rinfo.netdev = nic->kinfo.netdev;
2021         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2022
2023         roce->pdev = nic->pdev;
2024         roce->ae_algo = nic->ae_algo;
2025         roce->numa_node_mask = nic->numa_node_mask;
2026
2027         return 0;
2028 }
2029
2030 static int hclge_init_msi(struct hclge_dev *hdev)
2031 {
2032         struct pci_dev *pdev = hdev->pdev;
2033         int vectors;
2034         int i;
2035
2036         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2037                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2038         if (vectors < 0) {
2039                 dev_err(&pdev->dev,
2040                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2041                         vectors);
2042                 return vectors;
2043         }
2044         if (vectors < hdev->num_msi)
2045                 dev_warn(&hdev->pdev->dev,
2046                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2047                          hdev->num_msi, vectors);
2048
2049         hdev->num_msi = vectors;
2050         hdev->num_msi_left = vectors;
2051         hdev->base_msi_vector = pdev->irq;
2052         hdev->roce_base_vector = hdev->base_msi_vector +
2053                                 hdev->roce_base_msix_offset;
2054
2055         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2056                                            sizeof(u16), GFP_KERNEL);
2057         if (!hdev->vector_status) {
2058                 pci_free_irq_vectors(pdev);
2059                 return -ENOMEM;
2060         }
2061
2062         for (i = 0; i < hdev->num_msi; i++)
2063                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2064
2065         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2066                                         sizeof(int), GFP_KERNEL);
2067         if (!hdev->vector_irq) {
2068                 pci_free_irq_vectors(pdev);
2069                 return -ENOMEM;
2070         }
2071
2072         return 0;
2073 }
2074
2075 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2076 {
2077
2078         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2079                 duplex = HCLGE_MAC_FULL;
2080
2081         return duplex;
2082 }
2083
2084 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2085                                       u8 duplex)
2086 {
2087         struct hclge_config_mac_speed_dup_cmd *req;
2088         struct hclge_desc desc;
2089         int ret;
2090
2091         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2092
2093         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2094
2095         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2096
2097         switch (speed) {
2098         case HCLGE_MAC_SPEED_10M:
2099                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2100                                 HCLGE_CFG_SPEED_S, 6);
2101                 break;
2102         case HCLGE_MAC_SPEED_100M:
2103                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2104                                 HCLGE_CFG_SPEED_S, 7);
2105                 break;
2106         case HCLGE_MAC_SPEED_1G:
2107                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2108                                 HCLGE_CFG_SPEED_S, 0);
2109                 break;
2110         case HCLGE_MAC_SPEED_10G:
2111                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2112                                 HCLGE_CFG_SPEED_S, 1);
2113                 break;
2114         case HCLGE_MAC_SPEED_25G:
2115                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2116                                 HCLGE_CFG_SPEED_S, 2);
2117                 break;
2118         case HCLGE_MAC_SPEED_40G:
2119                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2120                                 HCLGE_CFG_SPEED_S, 3);
2121                 break;
2122         case HCLGE_MAC_SPEED_50G:
2123                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2124                                 HCLGE_CFG_SPEED_S, 4);
2125                 break;
2126         case HCLGE_MAC_SPEED_100G:
2127                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2128                                 HCLGE_CFG_SPEED_S, 5);
2129                 break;
2130         default:
2131                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2132                 return -EINVAL;
2133         }
2134
2135         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2136                       1);
2137
2138         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2139         if (ret) {
2140                 dev_err(&hdev->pdev->dev,
2141                         "mac speed/duplex config cmd failed %d.\n", ret);
2142                 return ret;
2143         }
2144
2145         return 0;
2146 }
2147
2148 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2149 {
2150         int ret;
2151
2152         duplex = hclge_check_speed_dup(duplex, speed);
2153         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2154                 return 0;
2155
2156         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2157         if (ret)
2158                 return ret;
2159
2160         hdev->hw.mac.speed = speed;
2161         hdev->hw.mac.duplex = duplex;
2162
2163         return 0;
2164 }
2165
2166 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2167                                      u8 duplex)
2168 {
2169         struct hclge_vport *vport = hclge_get_vport(handle);
2170         struct hclge_dev *hdev = vport->back;
2171
2172         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2173 }
2174
2175 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2176 {
2177         struct hclge_config_auto_neg_cmd *req;
2178         struct hclge_desc desc;
2179         u32 flag = 0;
2180         int ret;
2181
2182         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2183
2184         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2185         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2186         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2187
2188         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2189         if (ret)
2190                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2191                         ret);
2192
2193         return ret;
2194 }
2195
2196 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2197 {
2198         struct hclge_vport *vport = hclge_get_vport(handle);
2199         struct hclge_dev *hdev = vport->back;
2200
2201         return hclge_set_autoneg_en(hdev, enable);
2202 }
2203
2204 static int hclge_get_autoneg(struct hnae3_handle *handle)
2205 {
2206         struct hclge_vport *vport = hclge_get_vport(handle);
2207         struct hclge_dev *hdev = vport->back;
2208         struct phy_device *phydev = hdev->hw.mac.phydev;
2209
2210         if (phydev)
2211                 return phydev->autoneg;
2212
2213         return hdev->hw.mac.autoneg;
2214 }
2215
2216 static int hclge_mac_init(struct hclge_dev *hdev)
2217 {
2218         struct hclge_mac *mac = &hdev->hw.mac;
2219         int ret;
2220
2221         hdev->support_sfp_query = true;
2222         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2223         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2224                                          hdev->hw.mac.duplex);
2225         if (ret) {
2226                 dev_err(&hdev->pdev->dev,
2227                         "Config mac speed dup fail ret=%d\n", ret);
2228                 return ret;
2229         }
2230
2231         mac->link = 0;
2232
2233         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2234         if (ret) {
2235                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2236                 return ret;
2237         }
2238
2239         ret = hclge_buffer_alloc(hdev);
2240         if (ret)
2241                 dev_err(&hdev->pdev->dev,
2242                         "allocate buffer fail, ret=%d\n", ret);
2243
2244         return ret;
2245 }
2246
2247 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2248 {
2249         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2250             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2251                 schedule_work(&hdev->mbx_service_task);
2252 }
2253
2254 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2255 {
2256         if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2257                 schedule_work(&hdev->rst_service_task);
2258 }
2259
2260 static void hclge_task_schedule(struct hclge_dev *hdev)
2261 {
2262         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2263             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2264             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2265                 (void)schedule_work(&hdev->service_task);
2266 }
2267
2268 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2269 {
2270         struct hclge_link_status_cmd *req;
2271         struct hclge_desc desc;
2272         int link_status;
2273         int ret;
2274
2275         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2276         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2277         if (ret) {
2278                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2279                         ret);
2280                 return ret;
2281         }
2282
2283         req = (struct hclge_link_status_cmd *)desc.data;
2284         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2285
2286         return !!link_status;
2287 }
2288
2289 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2290 {
2291         int mac_state;
2292         int link_stat;
2293
2294         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2295                 return 0;
2296
2297         mac_state = hclge_get_mac_link_status(hdev);
2298
2299         if (hdev->hw.mac.phydev) {
2300                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2301                         link_stat = mac_state &
2302                                 hdev->hw.mac.phydev->link;
2303                 else
2304                         link_stat = 0;
2305
2306         } else {
2307                 link_stat = mac_state;
2308         }
2309
2310         return !!link_stat;
2311 }
2312
2313 static void hclge_update_link_status(struct hclge_dev *hdev)
2314 {
2315         struct hnae3_client *rclient = hdev->roce_client;
2316         struct hnae3_client *client = hdev->nic_client;
2317         struct hnae3_handle *rhandle;
2318         struct hnae3_handle *handle;
2319         int state;
2320         int i;
2321
2322         if (!client)
2323                 return;
2324         state = hclge_get_mac_phy_link(hdev);
2325         if (state != hdev->hw.mac.link) {
2326                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2327                         handle = &hdev->vport[i].nic;
2328                         client->ops->link_status_change(handle, state);
2329                         hclge_config_mac_tnl_int(hdev, state);
2330                         rhandle = &hdev->vport[i].roce;
2331                         if (rclient && rclient->ops->link_status_change)
2332                                 rclient->ops->link_status_change(rhandle,
2333                                                                  state);
2334                 }
2335                 hdev->hw.mac.link = state;
2336         }
2337 }
2338
2339 static void hclge_update_port_capability(struct hclge_mac *mac)
2340 {
2341         /* firmware can not identify back plane type, the media type
2342          * read from configuration can help deal it
2343          */
2344         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2345             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2346                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2347         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2348                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2349
2350         if (mac->support_autoneg == true) {
2351                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2352                 linkmode_copy(mac->advertising, mac->supported);
2353         } else {
2354                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2355                                    mac->supported);
2356                 linkmode_zero(mac->advertising);
2357         }
2358 }
2359
2360 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2361 {
2362         struct hclge_sfp_info_cmd *resp = NULL;
2363         struct hclge_desc desc;
2364         int ret;
2365
2366         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2367         resp = (struct hclge_sfp_info_cmd *)desc.data;
2368         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2369         if (ret == -EOPNOTSUPP) {
2370                 dev_warn(&hdev->pdev->dev,
2371                          "IMP do not support get SFP speed %d\n", ret);
2372                 return ret;
2373         } else if (ret) {
2374                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2375                 return ret;
2376         }
2377
2378         *speed = le32_to_cpu(resp->speed);
2379
2380         return 0;
2381 }
2382
2383 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2384 {
2385         struct hclge_sfp_info_cmd *resp;
2386         struct hclge_desc desc;
2387         int ret;
2388
2389         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2390         resp = (struct hclge_sfp_info_cmd *)desc.data;
2391
2392         resp->query_type = QUERY_ACTIVE_SPEED;
2393
2394         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2395         if (ret == -EOPNOTSUPP) {
2396                 dev_warn(&hdev->pdev->dev,
2397                          "IMP does not support get SFP info %d\n", ret);
2398                 return ret;
2399         } else if (ret) {
2400                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2401                 return ret;
2402         }
2403
2404         mac->speed = le32_to_cpu(resp->speed);
2405         /* if resp->speed_ability is 0, it means it's an old version
2406          * firmware, do not update these params
2407          */
2408         if (resp->speed_ability) {
2409                 mac->module_type = le32_to_cpu(resp->module_type);
2410                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2411                 mac->autoneg = resp->autoneg;
2412                 mac->support_autoneg = resp->autoneg_ability;
2413         } else {
2414                 mac->speed_type = QUERY_SFP_SPEED;
2415         }
2416
2417         return 0;
2418 }
2419
2420 static int hclge_update_port_info(struct hclge_dev *hdev)
2421 {
2422         struct hclge_mac *mac = &hdev->hw.mac;
2423         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2424         int ret;
2425
2426         /* get the port info from SFP cmd if not copper port */
2427         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2428                 return 0;
2429
2430         /* if IMP does not support get SFP/qSFP info, return directly */
2431         if (!hdev->support_sfp_query)
2432                 return 0;
2433
2434         if (hdev->pdev->revision >= 0x21)
2435                 ret = hclge_get_sfp_info(hdev, mac);
2436         else
2437                 ret = hclge_get_sfp_speed(hdev, &speed);
2438
2439         if (ret == -EOPNOTSUPP) {
2440                 hdev->support_sfp_query = false;
2441                 return ret;
2442         } else if (ret) {
2443                 return ret;
2444         }
2445
2446         if (hdev->pdev->revision >= 0x21) {
2447                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2448                         hclge_update_port_capability(mac);
2449                         return 0;
2450                 }
2451                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2452                                                HCLGE_MAC_FULL);
2453         } else {
2454                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2455                         return 0; /* do nothing if no SFP */
2456
2457                 /* must config full duplex for SFP */
2458                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2459         }
2460 }
2461
2462 static int hclge_get_status(struct hnae3_handle *handle)
2463 {
2464         struct hclge_vport *vport = hclge_get_vport(handle);
2465         struct hclge_dev *hdev = vport->back;
2466
2467         hclge_update_link_status(hdev);
2468
2469         return hdev->hw.mac.link;
2470 }
2471
2472 static void hclge_service_timer(struct timer_list *t)
2473 {
2474         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2475
2476         mod_timer(&hdev->service_timer, jiffies + HZ);
2477         hdev->hw_stats.stats_timer++;
2478         hclge_task_schedule(hdev);
2479 }
2480
2481 static void hclge_service_complete(struct hclge_dev *hdev)
2482 {
2483         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2484
2485         /* Flush memory before next watchdog */
2486         smp_mb__before_atomic();
2487         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2488 }
2489
2490 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2491 {
2492         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2493
2494         /* fetch the events from their corresponding regs */
2495         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2496         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2497         msix_src_reg = hclge_read_dev(&hdev->hw,
2498                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2499
2500         /* Assumption: If by any chance reset and mailbox events are reported
2501          * together then we will only process reset event in this go and will
2502          * defer the processing of the mailbox events. Since, we would have not
2503          * cleared RX CMDQ event this time we would receive again another
2504          * interrupt from H/W just for the mailbox.
2505          */
2506
2507         /* check for vector0 reset event sources */
2508         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2509                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2510                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2511                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2512                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2513                 hdev->rst_stats.imp_rst_cnt++;
2514                 return HCLGE_VECTOR0_EVENT_RST;
2515         }
2516
2517         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2518                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2519                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2520                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2521                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2522                 hdev->rst_stats.global_rst_cnt++;
2523                 return HCLGE_VECTOR0_EVENT_RST;
2524         }
2525
2526         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2527                 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2528                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2529                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2530                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2531                 hdev->rst_stats.core_rst_cnt++;
2532                 return HCLGE_VECTOR0_EVENT_RST;
2533         }
2534
2535         /* check for vector0 msix event source */
2536         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2537                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2538                         msix_src_reg);
2539                 return HCLGE_VECTOR0_EVENT_ERR;
2540         }
2541
2542         /* check for vector0 mailbox(=CMDQ RX) event source */
2543         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2544                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2545                 *clearval = cmdq_src_reg;
2546                 return HCLGE_VECTOR0_EVENT_MBX;
2547         }
2548
2549         /* print other vector0 event source */
2550         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2551                 cmdq_src_reg, msix_src_reg);
2552         return HCLGE_VECTOR0_EVENT_OTHER;
2553 }
2554
2555 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2556                                     u32 regclr)
2557 {
2558         switch (event_type) {
2559         case HCLGE_VECTOR0_EVENT_RST:
2560                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2561                 break;
2562         case HCLGE_VECTOR0_EVENT_MBX:
2563                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2564                 break;
2565         default:
2566                 break;
2567         }
2568 }
2569
2570 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2571 {
2572         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2573                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2574                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2575                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2576         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2577 }
2578
2579 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2580 {
2581         writel(enable ? 1 : 0, vector->addr);
2582 }
2583
2584 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2585 {
2586         struct hclge_dev *hdev = data;
2587         u32 event_cause;
2588         u32 clearval;
2589
2590         hclge_enable_vector(&hdev->misc_vector, false);
2591         event_cause = hclge_check_event_cause(hdev, &clearval);
2592
2593         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2594         switch (event_cause) {
2595         case HCLGE_VECTOR0_EVENT_ERR:
2596                 /* we do not know what type of reset is required now. This could
2597                  * only be decided after we fetch the type of errors which
2598                  * caused this event. Therefore, we will do below for now:
2599                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2600                  *    have defered type of reset to be used.
2601                  * 2. Schedule the reset serivce task.
2602                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2603                  *    will fetch the correct type of reset.  This would be done
2604                  *    by first decoding the types of errors.
2605                  */
2606                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2607                 /* fall through */
2608         case HCLGE_VECTOR0_EVENT_RST:
2609                 hclge_reset_task_schedule(hdev);
2610                 break;
2611         case HCLGE_VECTOR0_EVENT_MBX:
2612                 /* If we are here then,
2613                  * 1. Either we are not handling any mbx task and we are not
2614                  *    scheduled as well
2615                  *                        OR
2616                  * 2. We could be handling a mbx task but nothing more is
2617                  *    scheduled.
2618                  * In both cases, we should schedule mbx task as there are more
2619                  * mbx messages reported by this interrupt.
2620                  */
2621                 hclge_mbx_task_schedule(hdev);
2622                 break;
2623         default:
2624                 dev_warn(&hdev->pdev->dev,
2625                          "received unknown or unhandled event of vector0\n");
2626                 break;
2627         }
2628
2629         /* clear the source of interrupt if it is not cause by reset */
2630         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2631                 hclge_clear_event_cause(hdev, event_cause, clearval);
2632                 hclge_enable_vector(&hdev->misc_vector, true);
2633         }
2634
2635         return IRQ_HANDLED;
2636 }
2637
2638 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2639 {
2640         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2641                 dev_warn(&hdev->pdev->dev,
2642                          "vector(vector_id %d) has been freed.\n", vector_id);
2643                 return;
2644         }
2645
2646         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2647         hdev->num_msi_left += 1;
2648         hdev->num_msi_used -= 1;
2649 }
2650
2651 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2652 {
2653         struct hclge_misc_vector *vector = &hdev->misc_vector;
2654
2655         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2656
2657         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2658         hdev->vector_status[0] = 0;
2659
2660         hdev->num_msi_left -= 1;
2661         hdev->num_msi_used += 1;
2662 }
2663
2664 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2665 {
2666         int ret;
2667
2668         hclge_get_misc_vector(hdev);
2669
2670         /* this would be explicitly freed in the end */
2671         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2672                           0, "hclge_misc", hdev);
2673         if (ret) {
2674                 hclge_free_vector(hdev, 0);
2675                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2676                         hdev->misc_vector.vector_irq);
2677         }
2678
2679         return ret;
2680 }
2681
2682 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2683 {
2684         free_irq(hdev->misc_vector.vector_irq, hdev);
2685         hclge_free_vector(hdev, 0);
2686 }
2687
2688 int hclge_notify_client(struct hclge_dev *hdev,
2689                         enum hnae3_reset_notify_type type)
2690 {
2691         struct hnae3_client *client = hdev->nic_client;
2692         u16 i;
2693
2694         if (!client->ops->reset_notify)
2695                 return -EOPNOTSUPP;
2696
2697         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2698                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2699                 int ret;
2700
2701                 ret = client->ops->reset_notify(handle, type);
2702                 if (ret) {
2703                         dev_err(&hdev->pdev->dev,
2704                                 "notify nic client failed %d(%d)\n", type, ret);
2705                         return ret;
2706                 }
2707         }
2708
2709         return 0;
2710 }
2711
2712 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2713                                     enum hnae3_reset_notify_type type)
2714 {
2715         struct hnae3_client *client = hdev->roce_client;
2716         int ret = 0;
2717         u16 i;
2718
2719         if (!client)
2720                 return 0;
2721
2722         if (!client->ops->reset_notify)
2723                 return -EOPNOTSUPP;
2724
2725         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2726                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2727
2728                 ret = client->ops->reset_notify(handle, type);
2729                 if (ret) {
2730                         dev_err(&hdev->pdev->dev,
2731                                 "notify roce client failed %d(%d)",
2732                                 type, ret);
2733                         return ret;
2734                 }
2735         }
2736
2737         return ret;
2738 }
2739
2740 static int hclge_reset_wait(struct hclge_dev *hdev)
2741 {
2742 #define HCLGE_RESET_WATI_MS     100
2743 #define HCLGE_RESET_WAIT_CNT    200
2744         u32 val, reg, reg_bit;
2745         u32 cnt = 0;
2746
2747         switch (hdev->reset_type) {
2748         case HNAE3_IMP_RESET:
2749                 reg = HCLGE_GLOBAL_RESET_REG;
2750                 reg_bit = HCLGE_IMP_RESET_BIT;
2751                 break;
2752         case HNAE3_GLOBAL_RESET:
2753                 reg = HCLGE_GLOBAL_RESET_REG;
2754                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2755                 break;
2756         case HNAE3_CORE_RESET:
2757                 reg = HCLGE_GLOBAL_RESET_REG;
2758                 reg_bit = HCLGE_CORE_RESET_BIT;
2759                 break;
2760         case HNAE3_FUNC_RESET:
2761                 reg = HCLGE_FUN_RST_ING;
2762                 reg_bit = HCLGE_FUN_RST_ING_B;
2763                 break;
2764         case HNAE3_FLR_RESET:
2765                 break;
2766         default:
2767                 dev_err(&hdev->pdev->dev,
2768                         "Wait for unsupported reset type: %d\n",
2769                         hdev->reset_type);
2770                 return -EINVAL;
2771         }
2772
2773         if (hdev->reset_type == HNAE3_FLR_RESET) {
2774                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2775                        cnt++ < HCLGE_RESET_WAIT_CNT)
2776                         msleep(HCLGE_RESET_WATI_MS);
2777
2778                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2779                         dev_err(&hdev->pdev->dev,
2780                                 "flr wait timeout: %d\n", cnt);
2781                         return -EBUSY;
2782                 }
2783
2784                 return 0;
2785         }
2786
2787         val = hclge_read_dev(&hdev->hw, reg);
2788         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2789                 msleep(HCLGE_RESET_WATI_MS);
2790                 val = hclge_read_dev(&hdev->hw, reg);
2791                 cnt++;
2792         }
2793
2794         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2795                 dev_warn(&hdev->pdev->dev,
2796                          "Wait for reset timeout: %d\n", hdev->reset_type);
2797                 return -EBUSY;
2798         }
2799
2800         return 0;
2801 }
2802
2803 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2804 {
2805         struct hclge_vf_rst_cmd *req;
2806         struct hclge_desc desc;
2807
2808         req = (struct hclge_vf_rst_cmd *)desc.data;
2809         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2810         req->dest_vfid = func_id;
2811
2812         if (reset)
2813                 req->vf_rst = 0x1;
2814
2815         return hclge_cmd_send(&hdev->hw, &desc, 1);
2816 }
2817
2818 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2819 {
2820         int i;
2821
2822         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2823                 struct hclge_vport *vport = &hdev->vport[i];
2824                 int ret;
2825
2826                 /* Send cmd to set/clear VF's FUNC_RST_ING */
2827                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2828                 if (ret) {
2829                         dev_err(&hdev->pdev->dev,
2830                                 "set vf(%d) rst failed %d!\n",
2831                                 vport->vport_id, ret);
2832                         return ret;
2833                 }
2834
2835                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2836                         continue;
2837
2838                 /* Inform VF to process the reset.
2839                  * hclge_inform_reset_assert_to_vf may fail if VF
2840                  * driver is not loaded.
2841                  */
2842                 ret = hclge_inform_reset_assert_to_vf(vport);
2843                 if (ret)
2844                         dev_warn(&hdev->pdev->dev,
2845                                  "inform reset to vf(%d) failed %d!\n",
2846                                  vport->vport_id, ret);
2847         }
2848
2849         return 0;
2850 }
2851
2852 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2853 {
2854         struct hclge_desc desc;
2855         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2856         int ret;
2857
2858         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2859         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2860         req->fun_reset_vfid = func_id;
2861
2862         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2863         if (ret)
2864                 dev_err(&hdev->pdev->dev,
2865                         "send function reset cmd fail, status =%d\n", ret);
2866
2867         return ret;
2868 }
2869
2870 static void hclge_do_reset(struct hclge_dev *hdev)
2871 {
2872         struct hnae3_handle *handle = &hdev->vport[0].nic;
2873         struct pci_dev *pdev = hdev->pdev;
2874         u32 val;
2875
2876         if (hclge_get_hw_reset_stat(handle)) {
2877                 dev_info(&pdev->dev, "Hardware reset not finish\n");
2878                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
2879                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
2880                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
2881                 return;
2882         }
2883
2884         switch (hdev->reset_type) {
2885         case HNAE3_GLOBAL_RESET:
2886                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2887                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2888                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2889                 dev_info(&pdev->dev, "Global Reset requested\n");
2890                 break;
2891         case HNAE3_CORE_RESET:
2892                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2893                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2894                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2895                 dev_info(&pdev->dev, "Core Reset requested\n");
2896                 break;
2897         case HNAE3_FUNC_RESET:
2898                 dev_info(&pdev->dev, "PF Reset requested\n");
2899                 /* schedule again to check later */
2900                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2901                 hclge_reset_task_schedule(hdev);
2902                 break;
2903         case HNAE3_FLR_RESET:
2904                 dev_info(&pdev->dev, "FLR requested\n");
2905                 /* schedule again to check later */
2906                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2907                 hclge_reset_task_schedule(hdev);
2908                 break;
2909         default:
2910                 dev_warn(&pdev->dev,
2911                          "Unsupported reset type: %d\n", hdev->reset_type);
2912                 break;
2913         }
2914 }
2915
2916 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2917                                                    unsigned long *addr)
2918 {
2919         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2920
2921         /* first, resolve any unknown reset type to the known type(s) */
2922         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2923                 /* we will intentionally ignore any errors from this function
2924                  *  as we will end up in *some* reset request in any case
2925                  */
2926                 hclge_handle_hw_msix_error(hdev, addr);
2927                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2928                 /* We defered the clearing of the error event which caused
2929                  * interrupt since it was not posssible to do that in
2930                  * interrupt context (and this is the reason we introduced
2931                  * new UNKNOWN reset type). Now, the errors have been
2932                  * handled and cleared in hardware we can safely enable
2933                  * interrupts. This is an exception to the norm.
2934                  */
2935                 hclge_enable_vector(&hdev->misc_vector, true);
2936         }
2937
2938         /* return the highest priority reset level amongst all */
2939         if (test_bit(HNAE3_IMP_RESET, addr)) {
2940                 rst_level = HNAE3_IMP_RESET;
2941                 clear_bit(HNAE3_IMP_RESET, addr);
2942                 clear_bit(HNAE3_GLOBAL_RESET, addr);
2943                 clear_bit(HNAE3_CORE_RESET, addr);
2944                 clear_bit(HNAE3_FUNC_RESET, addr);
2945         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2946                 rst_level = HNAE3_GLOBAL_RESET;
2947                 clear_bit(HNAE3_GLOBAL_RESET, addr);
2948                 clear_bit(HNAE3_CORE_RESET, addr);
2949                 clear_bit(HNAE3_FUNC_RESET, addr);
2950         } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2951                 rst_level = HNAE3_CORE_RESET;
2952                 clear_bit(HNAE3_CORE_RESET, addr);
2953                 clear_bit(HNAE3_FUNC_RESET, addr);
2954         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2955                 rst_level = HNAE3_FUNC_RESET;
2956                 clear_bit(HNAE3_FUNC_RESET, addr);
2957         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2958                 rst_level = HNAE3_FLR_RESET;
2959                 clear_bit(HNAE3_FLR_RESET, addr);
2960         }
2961
2962         if (hdev->reset_type != HNAE3_NONE_RESET &&
2963             rst_level < hdev->reset_type)
2964                 return HNAE3_NONE_RESET;
2965
2966         return rst_level;
2967 }
2968
2969 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2970 {
2971         u32 clearval = 0;
2972
2973         switch (hdev->reset_type) {
2974         case HNAE3_IMP_RESET:
2975                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2976                 break;
2977         case HNAE3_GLOBAL_RESET:
2978                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2979                 break;
2980         case HNAE3_CORE_RESET:
2981                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2982                 break;
2983         default:
2984                 break;
2985         }
2986
2987         if (!clearval)
2988                 return;
2989
2990         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2991         hclge_enable_vector(&hdev->misc_vector, true);
2992 }
2993
2994 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2995 {
2996         int ret = 0;
2997
2998         switch (hdev->reset_type) {
2999         case HNAE3_FUNC_RESET:
3000                 /* fall through */
3001         case HNAE3_FLR_RESET:
3002                 ret = hclge_set_all_vf_rst(hdev, true);
3003                 break;
3004         default:
3005                 break;
3006         }
3007
3008         return ret;
3009 }
3010
3011 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3012 {
3013         u32 reg_val;
3014         int ret = 0;
3015
3016         switch (hdev->reset_type) {
3017         case HNAE3_FUNC_RESET:
3018                 /* There is no mechanism for PF to know if VF has stopped IO
3019                  * for now, just wait 100 ms for VF to stop IO
3020                  */
3021                 msleep(100);
3022                 ret = hclge_func_reset_cmd(hdev, 0);
3023                 if (ret) {
3024                         dev_err(&hdev->pdev->dev,
3025                                 "asserting function reset fail %d!\n", ret);
3026                         return ret;
3027                 }
3028
3029                 /* After performaning pf reset, it is not necessary to do the
3030                  * mailbox handling or send any command to firmware, because
3031                  * any mailbox handling or command to firmware is only valid
3032                  * after hclge_cmd_init is called.
3033                  */
3034                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3035                 hdev->rst_stats.pf_rst_cnt++;
3036                 break;
3037         case HNAE3_FLR_RESET:
3038                 /* There is no mechanism for PF to know if VF has stopped IO
3039                  * for now, just wait 100 ms for VF to stop IO
3040                  */
3041                 msleep(100);
3042                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3043                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3044                 hdev->rst_stats.flr_rst_cnt++;
3045                 break;
3046         case HNAE3_IMP_RESET:
3047                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3048                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3049                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3050                 break;
3051         default:
3052                 break;
3053         }
3054
3055         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3056
3057         return ret;
3058 }
3059
3060 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3061 {
3062 #define MAX_RESET_FAIL_CNT 5
3063 #define RESET_UPGRADE_DELAY_SEC 10
3064
3065         if (hdev->reset_pending) {
3066                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3067                          hdev->reset_pending);
3068                 return true;
3069         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3070                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3071                     BIT(HCLGE_IMP_RESET_BIT))) {
3072                 dev_info(&hdev->pdev->dev,
3073                          "reset failed because IMP Reset is pending\n");
3074                 hclge_clear_reset_cause(hdev);
3075                 return false;
3076         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3077                 hdev->reset_fail_cnt++;
3078                 if (is_timeout) {
3079                         set_bit(hdev->reset_type, &hdev->reset_pending);
3080                         dev_info(&hdev->pdev->dev,
3081                                  "re-schedule to wait for hw reset done\n");
3082                         return true;
3083                 }
3084
3085                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3086                 hclge_clear_reset_cause(hdev);
3087                 mod_timer(&hdev->reset_timer,
3088                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3089
3090                 return false;
3091         }
3092
3093         hclge_clear_reset_cause(hdev);
3094         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3095         return false;
3096 }
3097
3098 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3099 {
3100         int ret = 0;
3101
3102         switch (hdev->reset_type) {
3103         case HNAE3_FUNC_RESET:
3104                 /* fall through */
3105         case HNAE3_FLR_RESET:
3106                 ret = hclge_set_all_vf_rst(hdev, false);
3107                 break;
3108         default:
3109                 break;
3110         }
3111
3112         return ret;
3113 }
3114
3115 static void hclge_reset(struct hclge_dev *hdev)
3116 {
3117         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3118         bool is_timeout = false;
3119         int ret;
3120
3121         /* Initialize ae_dev reset status as well, in case enet layer wants to
3122          * know if device is undergoing reset
3123          */
3124         ae_dev->reset_type = hdev->reset_type;
3125         hdev->rst_stats.reset_cnt++;
3126         /* perform reset of the stack & ae device for a client */
3127         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3128         if (ret)
3129                 goto err_reset;
3130
3131         ret = hclge_reset_prepare_down(hdev);
3132         if (ret)
3133                 goto err_reset;
3134
3135         rtnl_lock();
3136         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3137         if (ret)
3138                 goto err_reset_lock;
3139
3140         rtnl_unlock();
3141
3142         ret = hclge_reset_prepare_wait(hdev);
3143         if (ret)
3144                 goto err_reset;
3145
3146         if (hclge_reset_wait(hdev)) {
3147                 is_timeout = true;
3148                 goto err_reset;
3149         }
3150
3151         hdev->rst_stats.hw_reset_done_cnt++;
3152
3153         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3154         if (ret)
3155                 goto err_reset;
3156
3157         rtnl_lock();
3158         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3159         if (ret)
3160                 goto err_reset_lock;
3161
3162         ret = hclge_reset_ae_dev(hdev->ae_dev);
3163         if (ret)
3164                 goto err_reset_lock;
3165
3166         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3167         if (ret)
3168                 goto err_reset_lock;
3169
3170         ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3171         if (ret)
3172                 goto err_reset_lock;
3173
3174         hclge_clear_reset_cause(hdev);
3175
3176         ret = hclge_reset_prepare_up(hdev);
3177         if (ret)
3178                 goto err_reset_lock;
3179
3180         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3181         if (ret)
3182                 goto err_reset_lock;
3183
3184         rtnl_unlock();
3185
3186         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3187         if (ret)
3188                 goto err_reset;
3189
3190         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3191         if (ret)
3192                 goto err_reset;
3193
3194         hdev->last_reset_time = jiffies;
3195         hdev->reset_fail_cnt = 0;
3196         hdev->rst_stats.reset_done_cnt++;
3197         ae_dev->reset_type = HNAE3_NONE_RESET;
3198         del_timer(&hdev->reset_timer);
3199
3200         return;
3201
3202 err_reset_lock:
3203         rtnl_unlock();
3204 err_reset:
3205         if (hclge_reset_err_handle(hdev, is_timeout))
3206                 hclge_reset_task_schedule(hdev);
3207 }
3208
3209 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3210 {
3211         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3212         struct hclge_dev *hdev = ae_dev->priv;
3213
3214         /* We might end up getting called broadly because of 2 below cases:
3215          * 1. Recoverable error was conveyed through APEI and only way to bring
3216          *    normalcy is to reset.
3217          * 2. A new reset request from the stack due to timeout
3218          *
3219          * For the first case,error event might not have ae handle available.
3220          * check if this is a new reset request and we are not here just because
3221          * last reset attempt did not succeed and watchdog hit us again. We will
3222          * know this if last reset request did not occur very recently (watchdog
3223          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3224          * In case of new request we reset the "reset level" to PF reset.
3225          * And if it is a repeat reset request of the most recent one then we
3226          * want to make sure we throttle the reset request. Therefore, we will
3227          * not allow it again before 3*HZ times.
3228          */
3229         if (!handle)
3230                 handle = &hdev->vport[0].nic;
3231
3232         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3233                 return;
3234         else if (hdev->default_reset_request)
3235                 hdev->reset_level =
3236                         hclge_get_reset_level(hdev,
3237                                               &hdev->default_reset_request);
3238         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3239                 hdev->reset_level = HNAE3_FUNC_RESET;
3240
3241         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3242                  hdev->reset_level);
3243
3244         /* request reset & schedule reset task */
3245         set_bit(hdev->reset_level, &hdev->reset_request);
3246         hclge_reset_task_schedule(hdev);
3247
3248         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3249                 hdev->reset_level++;
3250 }
3251
3252 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3253                                         enum hnae3_reset_type rst_type)
3254 {
3255         struct hclge_dev *hdev = ae_dev->priv;
3256
3257         set_bit(rst_type, &hdev->default_reset_request);
3258 }
3259
3260 static void hclge_reset_timer(struct timer_list *t)
3261 {
3262         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3263
3264         dev_info(&hdev->pdev->dev,
3265                  "triggering global reset in reset timer\n");
3266         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3267         hclge_reset_event(hdev->pdev, NULL);
3268 }
3269
3270 static void hclge_reset_subtask(struct hclge_dev *hdev)
3271 {
3272         /* check if there is any ongoing reset in the hardware. This status can
3273          * be checked from reset_pending. If there is then, we need to wait for
3274          * hardware to complete reset.
3275          *    a. If we are able to figure out in reasonable time that hardware
3276          *       has fully resetted then, we can proceed with driver, client
3277          *       reset.
3278          *    b. else, we can come back later to check this status so re-sched
3279          *       now.
3280          */
3281         hdev->last_reset_time = jiffies;
3282         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3283         if (hdev->reset_type != HNAE3_NONE_RESET)
3284                 hclge_reset(hdev);
3285
3286         /* check if we got any *new* reset requests to be honored */
3287         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3288         if (hdev->reset_type != HNAE3_NONE_RESET)
3289                 hclge_do_reset(hdev);
3290
3291         hdev->reset_type = HNAE3_NONE_RESET;
3292 }
3293
3294 static void hclge_reset_service_task(struct work_struct *work)
3295 {
3296         struct hclge_dev *hdev =
3297                 container_of(work, struct hclge_dev, rst_service_task);
3298
3299         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3300                 return;
3301
3302         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3303
3304         hclge_reset_subtask(hdev);
3305
3306         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3307 }
3308
3309 static void hclge_mailbox_service_task(struct work_struct *work)
3310 {
3311         struct hclge_dev *hdev =
3312                 container_of(work, struct hclge_dev, mbx_service_task);
3313
3314         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3315                 return;
3316
3317         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3318
3319         hclge_mbx_handler(hdev);
3320
3321         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3322 }
3323
3324 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3325 {
3326         int i;
3327
3328         /* start from vport 1 for PF is always alive */
3329         for (i = 1; i < hdev->num_alloc_vport; i++) {
3330                 struct hclge_vport *vport = &hdev->vport[i];
3331
3332                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3333                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3334
3335                 /* If vf is not alive, set to default value */
3336                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3337                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3338         }
3339 }
3340
3341 static void hclge_service_task(struct work_struct *work)
3342 {
3343         struct hclge_dev *hdev =
3344                 container_of(work, struct hclge_dev, service_task);
3345
3346         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3347                 hclge_update_stats_for_all(hdev);
3348                 hdev->hw_stats.stats_timer = 0;
3349         }
3350
3351         hclge_update_port_info(hdev);
3352         hclge_update_link_status(hdev);
3353         hclge_update_vport_alive(hdev);
3354         hclge_service_complete(hdev);
3355 }
3356
3357 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3358 {
3359         /* VF handle has no client */
3360         if (!handle->client)
3361                 return container_of(handle, struct hclge_vport, nic);
3362         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3363                 return container_of(handle, struct hclge_vport, roce);
3364         else
3365                 return container_of(handle, struct hclge_vport, nic);
3366 }
3367
3368 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3369                             struct hnae3_vector_info *vector_info)
3370 {
3371         struct hclge_vport *vport = hclge_get_vport(handle);
3372         struct hnae3_vector_info *vector = vector_info;
3373         struct hclge_dev *hdev = vport->back;
3374         int alloc = 0;
3375         int i, j;
3376
3377         vector_num = min(hdev->num_msi_left, vector_num);
3378
3379         for (j = 0; j < vector_num; j++) {
3380                 for (i = 1; i < hdev->num_msi; i++) {
3381                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3382                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3383                                 vector->io_addr = hdev->hw.io_base +
3384                                         HCLGE_VECTOR_REG_BASE +
3385                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3386                                         vport->vport_id *
3387                                         HCLGE_VECTOR_VF_OFFSET;
3388                                 hdev->vector_status[i] = vport->vport_id;
3389                                 hdev->vector_irq[i] = vector->vector;
3390
3391                                 vector++;
3392                                 alloc++;
3393
3394                                 break;
3395                         }
3396                 }
3397         }
3398         hdev->num_msi_left -= alloc;
3399         hdev->num_msi_used += alloc;
3400
3401         return alloc;
3402 }
3403
3404 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3405 {
3406         int i;
3407
3408         for (i = 0; i < hdev->num_msi; i++)
3409                 if (vector == hdev->vector_irq[i])
3410                         return i;
3411
3412         return -EINVAL;
3413 }
3414
3415 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3416 {
3417         struct hclge_vport *vport = hclge_get_vport(handle);
3418         struct hclge_dev *hdev = vport->back;
3419         int vector_id;
3420
3421         vector_id = hclge_get_vector_index(hdev, vector);
3422         if (vector_id < 0) {
3423                 dev_err(&hdev->pdev->dev,
3424                         "Get vector index fail. vector_id =%d\n", vector_id);
3425                 return vector_id;
3426         }
3427
3428         hclge_free_vector(hdev, vector_id);
3429
3430         return 0;
3431 }
3432
3433 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3434 {
3435         return HCLGE_RSS_KEY_SIZE;
3436 }
3437
3438 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3439 {
3440         return HCLGE_RSS_IND_TBL_SIZE;
3441 }
3442
3443 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3444                                   const u8 hfunc, const u8 *key)
3445 {
3446         struct hclge_rss_config_cmd *req;
3447         struct hclge_desc desc;
3448         int key_offset;
3449         int key_size;
3450         int ret;
3451
3452         req = (struct hclge_rss_config_cmd *)desc.data;
3453
3454         for (key_offset = 0; key_offset < 3; key_offset++) {
3455                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3456                                            false);
3457
3458                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3459                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3460
3461                 if (key_offset == 2)
3462                         key_size =
3463                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3464                 else
3465                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3466
3467                 memcpy(req->hash_key,
3468                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3469
3470                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3471                 if (ret) {
3472                         dev_err(&hdev->pdev->dev,
3473                                 "Configure RSS config fail, status = %d\n",
3474                                 ret);
3475                         return ret;
3476                 }
3477         }
3478         return 0;
3479 }
3480
3481 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3482 {
3483         struct hclge_rss_indirection_table_cmd *req;
3484         struct hclge_desc desc;
3485         int i, j;
3486         int ret;
3487
3488         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3489
3490         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3491                 hclge_cmd_setup_basic_desc
3492                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3493
3494                 req->start_table_index =
3495                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3496                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3497
3498                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3499                         req->rss_result[j] =
3500                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3501
3502                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3503                 if (ret) {
3504                         dev_err(&hdev->pdev->dev,
3505                                 "Configure rss indir table fail,status = %d\n",
3506                                 ret);
3507                         return ret;
3508                 }
3509         }
3510         return 0;
3511 }
3512
3513 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3514                                  u16 *tc_size, u16 *tc_offset)
3515 {
3516         struct hclge_rss_tc_mode_cmd *req;
3517         struct hclge_desc desc;
3518         int ret;
3519         int i;
3520
3521         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3522         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3523
3524         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3525                 u16 mode = 0;
3526
3527                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3528                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3529                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3530                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3531                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3532
3533                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3534         }
3535
3536         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3537         if (ret)
3538                 dev_err(&hdev->pdev->dev,
3539                         "Configure rss tc mode fail, status = %d\n", ret);
3540
3541         return ret;
3542 }
3543
3544 static void hclge_get_rss_type(struct hclge_vport *vport)
3545 {
3546         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3547             vport->rss_tuple_sets.ipv4_udp_en ||
3548             vport->rss_tuple_sets.ipv4_sctp_en ||
3549             vport->rss_tuple_sets.ipv6_tcp_en ||
3550             vport->rss_tuple_sets.ipv6_udp_en ||
3551             vport->rss_tuple_sets.ipv6_sctp_en)
3552                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3553         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3554                  vport->rss_tuple_sets.ipv6_fragment_en)
3555                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3556         else
3557                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3558 }
3559
3560 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3561 {
3562         struct hclge_rss_input_tuple_cmd *req;
3563         struct hclge_desc desc;
3564         int ret;
3565
3566         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3567
3568         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3569
3570         /* Get the tuple cfg from pf */
3571         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3572         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3573         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3574         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3575         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3576         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3577         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3578         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3579         hclge_get_rss_type(&hdev->vport[0]);
3580         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3581         if (ret)
3582                 dev_err(&hdev->pdev->dev,
3583                         "Configure rss input fail, status = %d\n", ret);
3584         return ret;
3585 }
3586
3587 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3588                          u8 *key, u8 *hfunc)
3589 {
3590         struct hclge_vport *vport = hclge_get_vport(handle);
3591         int i;
3592
3593         /* Get hash algorithm */
3594         if (hfunc) {
3595                 switch (vport->rss_algo) {
3596                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3597                         *hfunc = ETH_RSS_HASH_TOP;
3598                         break;
3599                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3600                         *hfunc = ETH_RSS_HASH_XOR;
3601                         break;
3602                 default:
3603                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3604                         break;
3605                 }
3606         }
3607
3608         /* Get the RSS Key required by the user */
3609         if (key)
3610                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3611
3612         /* Get indirect table */
3613         if (indir)
3614                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3615                         indir[i] =  vport->rss_indirection_tbl[i];
3616
3617         return 0;
3618 }
3619
3620 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3621                          const  u8 *key, const  u8 hfunc)
3622 {
3623         struct hclge_vport *vport = hclge_get_vport(handle);
3624         struct hclge_dev *hdev = vport->back;
3625         u8 hash_algo;
3626         int ret, i;
3627
3628         /* Set the RSS Hash Key if specififed by the user */
3629         if (key) {
3630                 switch (hfunc) {
3631                 case ETH_RSS_HASH_TOP:
3632                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3633                         break;
3634                 case ETH_RSS_HASH_XOR:
3635                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3636                         break;
3637                 case ETH_RSS_HASH_NO_CHANGE:
3638                         hash_algo = vport->rss_algo;
3639                         break;
3640                 default:
3641                         return -EINVAL;
3642                 }
3643
3644                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3645                 if (ret)
3646                         return ret;
3647
3648                 /* Update the shadow RSS key with user specified qids */
3649                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3650                 vport->rss_algo = hash_algo;
3651         }
3652
3653         /* Update the shadow RSS table with user specified qids */
3654         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3655                 vport->rss_indirection_tbl[i] = indir[i];
3656
3657         /* Update the hardware */
3658         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3659 }
3660
3661 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3662 {
3663         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3664
3665         if (nfc->data & RXH_L4_B_2_3)
3666                 hash_sets |= HCLGE_D_PORT_BIT;
3667         else
3668                 hash_sets &= ~HCLGE_D_PORT_BIT;
3669
3670         if (nfc->data & RXH_IP_SRC)
3671                 hash_sets |= HCLGE_S_IP_BIT;
3672         else
3673                 hash_sets &= ~HCLGE_S_IP_BIT;
3674
3675         if (nfc->data & RXH_IP_DST)
3676                 hash_sets |= HCLGE_D_IP_BIT;
3677         else
3678                 hash_sets &= ~HCLGE_D_IP_BIT;
3679
3680         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3681                 hash_sets |= HCLGE_V_TAG_BIT;
3682
3683         return hash_sets;
3684 }
3685
3686 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3687                                struct ethtool_rxnfc *nfc)
3688 {
3689         struct hclge_vport *vport = hclge_get_vport(handle);
3690         struct hclge_dev *hdev = vport->back;
3691         struct hclge_rss_input_tuple_cmd *req;
3692         struct hclge_desc desc;
3693         u8 tuple_sets;
3694         int ret;
3695
3696         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3697                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3698                 return -EINVAL;
3699
3700         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3701         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3702
3703         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3704         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3705         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3706         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3707         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3708         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3709         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3710         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3711
3712         tuple_sets = hclge_get_rss_hash_bits(nfc);
3713         switch (nfc->flow_type) {
3714         case TCP_V4_FLOW:
3715                 req->ipv4_tcp_en = tuple_sets;
3716                 break;
3717         case TCP_V6_FLOW:
3718                 req->ipv6_tcp_en = tuple_sets;
3719                 break;
3720         case UDP_V4_FLOW:
3721                 req->ipv4_udp_en = tuple_sets;
3722                 break;
3723         case UDP_V6_FLOW:
3724                 req->ipv6_udp_en = tuple_sets;
3725                 break;
3726         case SCTP_V4_FLOW:
3727                 req->ipv4_sctp_en = tuple_sets;
3728                 break;
3729         case SCTP_V6_FLOW:
3730                 if ((nfc->data & RXH_L4_B_0_1) ||
3731                     (nfc->data & RXH_L4_B_2_3))
3732                         return -EINVAL;
3733
3734                 req->ipv6_sctp_en = tuple_sets;
3735                 break;
3736         case IPV4_FLOW:
3737                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3738                 break;
3739         case IPV6_FLOW:
3740                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3741                 break;
3742         default:
3743                 return -EINVAL;
3744         }
3745
3746         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3747         if (ret) {
3748                 dev_err(&hdev->pdev->dev,
3749                         "Set rss tuple fail, status = %d\n", ret);
3750                 return ret;
3751         }
3752
3753         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3754         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3755         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3756         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3757         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3758         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3759         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3760         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3761         hclge_get_rss_type(vport);
3762         return 0;
3763 }
3764
3765 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3766                                struct ethtool_rxnfc *nfc)
3767 {
3768         struct hclge_vport *vport = hclge_get_vport(handle);
3769         u8 tuple_sets;
3770
3771         nfc->data = 0;
3772
3773         switch (nfc->flow_type) {
3774         case TCP_V4_FLOW:
3775                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3776                 break;
3777         case UDP_V4_FLOW:
3778                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3779                 break;
3780         case TCP_V6_FLOW:
3781                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3782                 break;
3783         case UDP_V6_FLOW:
3784                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3785                 break;
3786         case SCTP_V4_FLOW:
3787                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3788                 break;
3789         case SCTP_V6_FLOW:
3790                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3791                 break;
3792         case IPV4_FLOW:
3793         case IPV6_FLOW:
3794                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3795                 break;
3796         default:
3797                 return -EINVAL;
3798         }
3799
3800         if (!tuple_sets)
3801                 return 0;
3802
3803         if (tuple_sets & HCLGE_D_PORT_BIT)
3804                 nfc->data |= RXH_L4_B_2_3;
3805         if (tuple_sets & HCLGE_S_PORT_BIT)
3806                 nfc->data |= RXH_L4_B_0_1;
3807         if (tuple_sets & HCLGE_D_IP_BIT)
3808                 nfc->data |= RXH_IP_DST;
3809         if (tuple_sets & HCLGE_S_IP_BIT)
3810                 nfc->data |= RXH_IP_SRC;
3811
3812         return 0;
3813 }
3814
3815 static int hclge_get_tc_size(struct hnae3_handle *handle)
3816 {
3817         struct hclge_vport *vport = hclge_get_vport(handle);
3818         struct hclge_dev *hdev = vport->back;
3819
3820         return hdev->rss_size_max;
3821 }
3822
3823 int hclge_rss_init_hw(struct hclge_dev *hdev)
3824 {
3825         struct hclge_vport *vport = hdev->vport;
3826         u8 *rss_indir = vport[0].rss_indirection_tbl;
3827         u16 rss_size = vport[0].alloc_rss_size;
3828         u8 *key = vport[0].rss_hash_key;
3829         u8 hfunc = vport[0].rss_algo;
3830         u16 tc_offset[HCLGE_MAX_TC_NUM];
3831         u16 tc_valid[HCLGE_MAX_TC_NUM];
3832         u16 tc_size[HCLGE_MAX_TC_NUM];
3833         u16 roundup_size;
3834         int i, ret;
3835
3836         ret = hclge_set_rss_indir_table(hdev, rss_indir);
3837         if (ret)
3838                 return ret;
3839
3840         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3841         if (ret)
3842                 return ret;
3843
3844         ret = hclge_set_rss_input_tuple(hdev);
3845         if (ret)
3846                 return ret;
3847
3848         /* Each TC have the same queue size, and tc_size set to hardware is
3849          * the log2 of roundup power of two of rss_size, the acutal queue
3850          * size is limited by indirection table.
3851          */
3852         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3853                 dev_err(&hdev->pdev->dev,
3854                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3855                         rss_size);
3856                 return -EINVAL;
3857         }
3858
3859         roundup_size = roundup_pow_of_two(rss_size);
3860         roundup_size = ilog2(roundup_size);
3861
3862         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3863                 tc_valid[i] = 0;
3864
3865                 if (!(hdev->hw_tc_map & BIT(i)))
3866                         continue;
3867
3868                 tc_valid[i] = 1;
3869                 tc_size[i] = roundup_size;
3870                 tc_offset[i] = rss_size * i;
3871         }
3872
3873         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3874 }
3875
3876 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3877 {
3878         struct hclge_vport *vport = hdev->vport;
3879         int i, j;
3880
3881         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3882                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3883                         vport[j].rss_indirection_tbl[i] =
3884                                 i % vport[j].alloc_rss_size;
3885         }
3886 }
3887
3888 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3889 {
3890         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3891         struct hclge_vport *vport = hdev->vport;
3892
3893         if (hdev->pdev->revision >= 0x21)
3894                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3895
3896         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3897                 vport[i].rss_tuple_sets.ipv4_tcp_en =
3898                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3899                 vport[i].rss_tuple_sets.ipv4_udp_en =
3900                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3901                 vport[i].rss_tuple_sets.ipv4_sctp_en =
3902                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3903                 vport[i].rss_tuple_sets.ipv4_fragment_en =
3904                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3905                 vport[i].rss_tuple_sets.ipv6_tcp_en =
3906                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3907                 vport[i].rss_tuple_sets.ipv6_udp_en =
3908                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3909                 vport[i].rss_tuple_sets.ipv6_sctp_en =
3910                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3911                 vport[i].rss_tuple_sets.ipv6_fragment_en =
3912                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3913
3914                 vport[i].rss_algo = rss_algo;
3915
3916                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
3917                        HCLGE_RSS_KEY_SIZE);
3918         }
3919
3920         hclge_rss_indir_init_cfg(hdev);
3921 }
3922
3923 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3924                                 int vector_id, bool en,
3925                                 struct hnae3_ring_chain_node *ring_chain)
3926 {
3927         struct hclge_dev *hdev = vport->back;
3928         struct hnae3_ring_chain_node *node;
3929         struct hclge_desc desc;
3930         struct hclge_ctrl_vector_chain_cmd *req
3931                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3932         enum hclge_cmd_status status;
3933         enum hclge_opcode_type op;
3934         u16 tqp_type_and_id;
3935         int i;
3936
3937         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3938         hclge_cmd_setup_basic_desc(&desc, op, false);
3939         req->int_vector_id = vector_id;
3940
3941         i = 0;
3942         for (node = ring_chain; node; node = node->next) {
3943                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3944                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3945                                 HCLGE_INT_TYPE_S,
3946                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3947                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3948                                 HCLGE_TQP_ID_S, node->tqp_index);
3949                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3950                                 HCLGE_INT_GL_IDX_S,
3951                                 hnae3_get_field(node->int_gl_idx,
3952                                                 HNAE3_RING_GL_IDX_M,
3953                                                 HNAE3_RING_GL_IDX_S));
3954                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3955                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3956                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3957                         req->vfid = vport->vport_id;
3958
3959                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
3960                         if (status) {
3961                                 dev_err(&hdev->pdev->dev,
3962                                         "Map TQP fail, status is %d.\n",
3963                                         status);
3964                                 return -EIO;
3965                         }
3966                         i = 0;
3967
3968                         hclge_cmd_setup_basic_desc(&desc,
3969                                                    op,
3970                                                    false);
3971                         req->int_vector_id = vector_id;
3972                 }
3973         }
3974
3975         if (i > 0) {
3976                 req->int_cause_num = i;
3977                 req->vfid = vport->vport_id;
3978                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3979                 if (status) {
3980                         dev_err(&hdev->pdev->dev,
3981                                 "Map TQP fail, status is %d.\n", status);
3982                         return -EIO;
3983                 }
3984         }
3985
3986         return 0;
3987 }
3988
3989 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3990                                     int vector,
3991                                     struct hnae3_ring_chain_node *ring_chain)
3992 {
3993         struct hclge_vport *vport = hclge_get_vport(handle);
3994         struct hclge_dev *hdev = vport->back;
3995         int vector_id;
3996
3997         vector_id = hclge_get_vector_index(hdev, vector);
3998         if (vector_id < 0) {
3999                 dev_err(&hdev->pdev->dev,
4000                         "Get vector index fail. vector_id =%d\n", vector_id);
4001                 return vector_id;
4002         }
4003
4004         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4005 }
4006
4007 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4008                                        int vector,
4009                                        struct hnae3_ring_chain_node *ring_chain)
4010 {
4011         struct hclge_vport *vport = hclge_get_vport(handle);
4012         struct hclge_dev *hdev = vport->back;
4013         int vector_id, ret;
4014
4015         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4016                 return 0;
4017
4018         vector_id = hclge_get_vector_index(hdev, vector);
4019         if (vector_id < 0) {
4020                 dev_err(&handle->pdev->dev,
4021                         "Get vector index fail. ret =%d\n", vector_id);
4022                 return vector_id;
4023         }
4024
4025         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4026         if (ret)
4027                 dev_err(&handle->pdev->dev,
4028                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4029                         vector_id,
4030                         ret);
4031
4032         return ret;
4033 }
4034
4035 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4036                                struct hclge_promisc_param *param)
4037 {
4038         struct hclge_promisc_cfg_cmd *req;
4039         struct hclge_desc desc;
4040         int ret;
4041
4042         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4043
4044         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4045         req->vf_id = param->vf_id;
4046
4047         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4048          * pdev revision(0x20), new revision support them. The
4049          * value of this two fields will not return error when driver
4050          * send command to fireware in revision(0x20).
4051          */
4052         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4053                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4054
4055         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4056         if (ret)
4057                 dev_err(&hdev->pdev->dev,
4058                         "Set promisc mode fail, status is %d.\n", ret);
4059
4060         return ret;
4061 }
4062
4063 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4064                               bool en_mc, bool en_bc, int vport_id)
4065 {
4066         if (!param)
4067                 return;
4068
4069         memset(param, 0, sizeof(struct hclge_promisc_param));
4070         if (en_uc)
4071                 param->enable = HCLGE_PROMISC_EN_UC;
4072         if (en_mc)
4073                 param->enable |= HCLGE_PROMISC_EN_MC;
4074         if (en_bc)
4075                 param->enable |= HCLGE_PROMISC_EN_BC;
4076         param->vf_id = vport_id;
4077 }
4078
4079 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4080                                   bool en_mc_pmc)
4081 {
4082         struct hclge_vport *vport = hclge_get_vport(handle);
4083         struct hclge_dev *hdev = vport->back;
4084         struct hclge_promisc_param param;
4085         bool en_bc_pmc = true;
4086
4087         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4088          * always bypassed. So broadcast promisc should be disabled until
4089          * user enable promisc mode
4090          */
4091         if (handle->pdev->revision == 0x20)
4092                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4093
4094         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4095                                  vport->vport_id);
4096         return hclge_cmd_set_promisc_mode(hdev, &param);
4097 }
4098
4099 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4100 {
4101         struct hclge_get_fd_mode_cmd *req;
4102         struct hclge_desc desc;
4103         int ret;
4104
4105         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4106
4107         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4108
4109         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4110         if (ret) {
4111                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4112                 return ret;
4113         }
4114
4115         *fd_mode = req->mode;
4116
4117         return ret;
4118 }
4119
4120 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4121                                    u32 *stage1_entry_num,
4122                                    u32 *stage2_entry_num,
4123                                    u16 *stage1_counter_num,
4124                                    u16 *stage2_counter_num)
4125 {
4126         struct hclge_get_fd_allocation_cmd *req;
4127         struct hclge_desc desc;
4128         int ret;
4129
4130         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4131
4132         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4133
4134         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4135         if (ret) {
4136                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4137                         ret);
4138                 return ret;
4139         }
4140
4141         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4142         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4143         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4144         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4145
4146         return ret;
4147 }
4148
4149 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4150 {
4151         struct hclge_set_fd_key_config_cmd *req;
4152         struct hclge_fd_key_cfg *stage;
4153         struct hclge_desc desc;
4154         int ret;
4155
4156         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4157
4158         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4159         stage = &hdev->fd_cfg.key_cfg[stage_num];
4160         req->stage = stage_num;
4161         req->key_select = stage->key_sel;
4162         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4163         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4164         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4165         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4166         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4167         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4168
4169         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4170         if (ret)
4171                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4172
4173         return ret;
4174 }
4175
4176 static int hclge_init_fd_config(struct hclge_dev *hdev)
4177 {
4178 #define LOW_2_WORDS             0x03
4179         struct hclge_fd_key_cfg *key_cfg;
4180         int ret;
4181
4182         if (!hnae3_dev_fd_supported(hdev))
4183                 return 0;
4184
4185         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4186         if (ret)
4187                 return ret;
4188
4189         switch (hdev->fd_cfg.fd_mode) {
4190         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4191                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4192                 break;
4193         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4194                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4195                 break;
4196         default:
4197                 dev_err(&hdev->pdev->dev,
4198                         "Unsupported flow director mode %d\n",
4199                         hdev->fd_cfg.fd_mode);
4200                 return -EOPNOTSUPP;
4201         }
4202
4203         hdev->fd_cfg.proto_support =
4204                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4205                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4206         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4207         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4208         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4209         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4210         key_cfg->outer_sipv6_word_en = 0;
4211         key_cfg->outer_dipv6_word_en = 0;
4212
4213         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4214                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4215                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4216                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4217
4218         /* If use max 400bit key, we can support tuples for ether type */
4219         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4220                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4221                 key_cfg->tuple_active |=
4222                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4223         }
4224
4225         /* roce_type is used to filter roce frames
4226          * dst_vport is used to specify the rule
4227          */
4228         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4229
4230         ret = hclge_get_fd_allocation(hdev,
4231                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4232                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4233                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4234                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4235         if (ret)
4236                 return ret;
4237
4238         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4239 }
4240
4241 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4242                                 int loc, u8 *key, bool is_add)
4243 {
4244         struct hclge_fd_tcam_config_1_cmd *req1;
4245         struct hclge_fd_tcam_config_2_cmd *req2;
4246         struct hclge_fd_tcam_config_3_cmd *req3;
4247         struct hclge_desc desc[3];
4248         int ret;
4249
4250         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4251         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4252         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4253         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4254         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4255
4256         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4257         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4258         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4259
4260         req1->stage = stage;
4261         req1->xy_sel = sel_x ? 1 : 0;
4262         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4263         req1->index = cpu_to_le32(loc);
4264         req1->entry_vld = sel_x ? is_add : 0;
4265
4266         if (key) {
4267                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4268                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4269                        sizeof(req2->tcam_data));
4270                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4271                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4272         }
4273
4274         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4275         if (ret)
4276                 dev_err(&hdev->pdev->dev,
4277                         "config tcam key fail, ret=%d\n",
4278                         ret);
4279
4280         return ret;
4281 }
4282
4283 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4284                               struct hclge_fd_ad_data *action)
4285 {
4286         struct hclge_fd_ad_config_cmd *req;
4287         struct hclge_desc desc;
4288         u64 ad_data = 0;
4289         int ret;
4290
4291         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4292
4293         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4294         req->index = cpu_to_le32(loc);
4295         req->stage = stage;
4296
4297         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4298                       action->write_rule_id_to_bd);
4299         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4300                         action->rule_id);
4301         ad_data <<= 32;
4302         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4303         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4304                       action->forward_to_direct_queue);
4305         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4306                         action->queue_id);
4307         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4308         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4309                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4310         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4311         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4312                         action->counter_id);
4313
4314         req->ad_data = cpu_to_le64(ad_data);
4315         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4316         if (ret)
4317                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4318
4319         return ret;
4320 }
4321
4322 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4323                                    struct hclge_fd_rule *rule)
4324 {
4325         u16 tmp_x_s, tmp_y_s;
4326         u32 tmp_x_l, tmp_y_l;
4327         int i;
4328
4329         if (rule->unused_tuple & tuple_bit)
4330                 return true;
4331
4332         switch (tuple_bit) {
4333         case 0:
4334                 return false;
4335         case BIT(INNER_DST_MAC):
4336                 for (i = 0; i < 6; i++) {
4337                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4338                                rule->tuples_mask.dst_mac[i]);
4339                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4340                                rule->tuples_mask.dst_mac[i]);
4341                 }
4342
4343                 return true;
4344         case BIT(INNER_SRC_MAC):
4345                 for (i = 0; i < 6; i++) {
4346                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4347                                rule->tuples.src_mac[i]);
4348                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4349                                rule->tuples.src_mac[i]);
4350                 }
4351
4352                 return true;
4353         case BIT(INNER_VLAN_TAG_FST):
4354                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4355                        rule->tuples_mask.vlan_tag1);
4356                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4357                        rule->tuples_mask.vlan_tag1);
4358                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4359                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4360
4361                 return true;
4362         case BIT(INNER_ETH_TYPE):
4363                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4364                        rule->tuples_mask.ether_proto);
4365                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4366                        rule->tuples_mask.ether_proto);
4367                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4368                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4369
4370                 return true;
4371         case BIT(INNER_IP_TOS):
4372                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4373                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4374
4375                 return true;
4376         case BIT(INNER_IP_PROTO):
4377                 calc_x(*key_x, rule->tuples.ip_proto,
4378                        rule->tuples_mask.ip_proto);
4379                 calc_y(*key_y, rule->tuples.ip_proto,
4380                        rule->tuples_mask.ip_proto);
4381
4382                 return true;
4383         case BIT(INNER_SRC_IP):
4384                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4385                        rule->tuples_mask.src_ip[3]);
4386                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4387                        rule->tuples_mask.src_ip[3]);
4388                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4389                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4390
4391                 return true;
4392         case BIT(INNER_DST_IP):
4393                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4394                        rule->tuples_mask.dst_ip[3]);
4395                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4396                        rule->tuples_mask.dst_ip[3]);
4397                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4398                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4399
4400                 return true;
4401         case BIT(INNER_SRC_PORT):
4402                 calc_x(tmp_x_s, rule->tuples.src_port,
4403                        rule->tuples_mask.src_port);
4404                 calc_y(tmp_y_s, rule->tuples.src_port,
4405                        rule->tuples_mask.src_port);
4406                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4407                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4408
4409                 return true;
4410         case BIT(INNER_DST_PORT):
4411                 calc_x(tmp_x_s, rule->tuples.dst_port,
4412                        rule->tuples_mask.dst_port);
4413                 calc_y(tmp_y_s, rule->tuples.dst_port,
4414                        rule->tuples_mask.dst_port);
4415                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4416                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4417
4418                 return true;
4419         default:
4420                 return false;
4421         }
4422 }
4423
4424 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4425                                  u8 vf_id, u8 network_port_id)
4426 {
4427         u32 port_number = 0;
4428
4429         if (port_type == HOST_PORT) {
4430                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4431                                 pf_id);
4432                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4433                                 vf_id);
4434                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4435         } else {
4436                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4437                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4438                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4439         }
4440
4441         return port_number;
4442 }
4443
4444 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4445                                        __le32 *key_x, __le32 *key_y,
4446                                        struct hclge_fd_rule *rule)
4447 {
4448         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4449         u8 cur_pos = 0, tuple_size, shift_bits;
4450         int i;
4451
4452         for (i = 0; i < MAX_META_DATA; i++) {
4453                 tuple_size = meta_data_key_info[i].key_length;
4454                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4455
4456                 switch (tuple_bit) {
4457                 case BIT(ROCE_TYPE):
4458                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4459                         cur_pos += tuple_size;
4460                         break;
4461                 case BIT(DST_VPORT):
4462                         port_number = hclge_get_port_number(HOST_PORT, 0,
4463                                                             rule->vf_id, 0);
4464                         hnae3_set_field(meta_data,
4465                                         GENMASK(cur_pos + tuple_size, cur_pos),
4466                                         cur_pos, port_number);
4467                         cur_pos += tuple_size;
4468                         break;
4469                 default:
4470                         break;
4471                 }
4472         }
4473
4474         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4475         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4476         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4477
4478         *key_x = cpu_to_le32(tmp_x << shift_bits);
4479         *key_y = cpu_to_le32(tmp_y << shift_bits);
4480 }
4481
4482 /* A complete key is combined with meta data key and tuple key.
4483  * Meta data key is stored at the MSB region, and tuple key is stored at
4484  * the LSB region, unused bits will be filled 0.
4485  */
4486 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4487                             struct hclge_fd_rule *rule)
4488 {
4489         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4490         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4491         u8 *cur_key_x, *cur_key_y;
4492         int i, ret, tuple_size;
4493         u8 meta_data_region;
4494
4495         memset(key_x, 0, sizeof(key_x));
4496         memset(key_y, 0, sizeof(key_y));
4497         cur_key_x = key_x;
4498         cur_key_y = key_y;
4499
4500         for (i = 0 ; i < MAX_TUPLE; i++) {
4501                 bool tuple_valid;
4502                 u32 check_tuple;
4503
4504                 tuple_size = tuple_key_info[i].key_length / 8;
4505                 check_tuple = key_cfg->tuple_active & BIT(i);
4506
4507                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4508                                                      cur_key_y, rule);
4509                 if (tuple_valid) {
4510                         cur_key_x += tuple_size;
4511                         cur_key_y += tuple_size;
4512                 }
4513         }
4514
4515         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4516                         MAX_META_DATA_LENGTH / 8;
4517
4518         hclge_fd_convert_meta_data(key_cfg,
4519                                    (__le32 *)(key_x + meta_data_region),
4520                                    (__le32 *)(key_y + meta_data_region),
4521                                    rule);
4522
4523         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4524                                    true);
4525         if (ret) {
4526                 dev_err(&hdev->pdev->dev,
4527                         "fd key_y config fail, loc=%d, ret=%d\n",
4528                         rule->queue_id, ret);
4529                 return ret;
4530         }
4531
4532         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4533                                    true);
4534         if (ret)
4535                 dev_err(&hdev->pdev->dev,
4536                         "fd key_x config fail, loc=%d, ret=%d\n",
4537                         rule->queue_id, ret);
4538         return ret;
4539 }
4540
4541 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4542                                struct hclge_fd_rule *rule)
4543 {
4544         struct hclge_fd_ad_data ad_data;
4545
4546         ad_data.ad_id = rule->location;
4547
4548         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4549                 ad_data.drop_packet = true;
4550                 ad_data.forward_to_direct_queue = false;
4551                 ad_data.queue_id = 0;
4552         } else {
4553                 ad_data.drop_packet = false;
4554                 ad_data.forward_to_direct_queue = true;
4555                 ad_data.queue_id = rule->queue_id;
4556         }
4557
4558         ad_data.use_counter = false;
4559         ad_data.counter_id = 0;
4560
4561         ad_data.use_next_stage = false;
4562         ad_data.next_input_key = 0;
4563
4564         ad_data.write_rule_id_to_bd = true;
4565         ad_data.rule_id = rule->location;
4566
4567         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4568 }
4569
4570 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4571                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4572 {
4573         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4574         struct ethtool_usrip4_spec *usr_ip4_spec;
4575         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4576         struct ethtool_usrip6_spec *usr_ip6_spec;
4577         struct ethhdr *ether_spec;
4578
4579         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4580                 return -EINVAL;
4581
4582         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4583                 return -EOPNOTSUPP;
4584
4585         if ((fs->flow_type & FLOW_EXT) &&
4586             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4587                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4588                 return -EOPNOTSUPP;
4589         }
4590
4591         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4592         case SCTP_V4_FLOW:
4593         case TCP_V4_FLOW:
4594         case UDP_V4_FLOW:
4595                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4596                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4597
4598                 if (!tcp_ip4_spec->ip4src)
4599                         *unused |= BIT(INNER_SRC_IP);
4600
4601                 if (!tcp_ip4_spec->ip4dst)
4602                         *unused |= BIT(INNER_DST_IP);
4603
4604                 if (!tcp_ip4_spec->psrc)
4605                         *unused |= BIT(INNER_SRC_PORT);
4606
4607                 if (!tcp_ip4_spec->pdst)
4608                         *unused |= BIT(INNER_DST_PORT);
4609
4610                 if (!tcp_ip4_spec->tos)
4611                         *unused |= BIT(INNER_IP_TOS);
4612
4613                 break;
4614         case IP_USER_FLOW:
4615                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4616                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4617                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4618
4619                 if (!usr_ip4_spec->ip4src)
4620                         *unused |= BIT(INNER_SRC_IP);
4621
4622                 if (!usr_ip4_spec->ip4dst)
4623                         *unused |= BIT(INNER_DST_IP);
4624
4625                 if (!usr_ip4_spec->tos)
4626                         *unused |= BIT(INNER_IP_TOS);
4627
4628                 if (!usr_ip4_spec->proto)
4629                         *unused |= BIT(INNER_IP_PROTO);
4630
4631                 if (usr_ip4_spec->l4_4_bytes)
4632                         return -EOPNOTSUPP;
4633
4634                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4635                         return -EOPNOTSUPP;
4636
4637                 break;
4638         case SCTP_V6_FLOW:
4639         case TCP_V6_FLOW:
4640         case UDP_V6_FLOW:
4641                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4642                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4643                         BIT(INNER_IP_TOS);
4644
4645                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4646                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4647                         *unused |= BIT(INNER_SRC_IP);
4648
4649                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4650                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4651                         *unused |= BIT(INNER_DST_IP);
4652
4653                 if (!tcp_ip6_spec->psrc)
4654                         *unused |= BIT(INNER_SRC_PORT);
4655
4656                 if (!tcp_ip6_spec->pdst)
4657                         *unused |= BIT(INNER_DST_PORT);
4658
4659                 if (tcp_ip6_spec->tclass)
4660                         return -EOPNOTSUPP;
4661
4662                 break;
4663         case IPV6_USER_FLOW:
4664                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4665                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4666                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4667                         BIT(INNER_DST_PORT);
4668
4669                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4670                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4671                         *unused |= BIT(INNER_SRC_IP);
4672
4673                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4674                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4675                         *unused |= BIT(INNER_DST_IP);
4676
4677                 if (!usr_ip6_spec->l4_proto)
4678                         *unused |= BIT(INNER_IP_PROTO);
4679
4680                 if (usr_ip6_spec->tclass)
4681                         return -EOPNOTSUPP;
4682
4683                 if (usr_ip6_spec->l4_4_bytes)
4684                         return -EOPNOTSUPP;
4685
4686                 break;
4687         case ETHER_FLOW:
4688                 ether_spec = &fs->h_u.ether_spec;
4689                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4690                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4691                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4692
4693                 if (is_zero_ether_addr(ether_spec->h_source))
4694                         *unused |= BIT(INNER_SRC_MAC);
4695
4696                 if (is_zero_ether_addr(ether_spec->h_dest))
4697                         *unused |= BIT(INNER_DST_MAC);
4698
4699                 if (!ether_spec->h_proto)
4700                         *unused |= BIT(INNER_ETH_TYPE);
4701
4702                 break;
4703         default:
4704                 return -EOPNOTSUPP;
4705         }
4706
4707         if ((fs->flow_type & FLOW_EXT)) {
4708                 if (fs->h_ext.vlan_etype)
4709                         return -EOPNOTSUPP;
4710                 if (!fs->h_ext.vlan_tci)
4711                         *unused |= BIT(INNER_VLAN_TAG_FST);
4712
4713                 if (fs->m_ext.vlan_tci) {
4714                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4715                                 return -EINVAL;
4716                 }
4717         } else {
4718                 *unused |= BIT(INNER_VLAN_TAG_FST);
4719         }
4720
4721         if (fs->flow_type & FLOW_MAC_EXT) {
4722                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4723                         return -EOPNOTSUPP;
4724
4725                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4726                         *unused |= BIT(INNER_DST_MAC);
4727                 else
4728                         *unused &= ~(BIT(INNER_DST_MAC));
4729         }
4730
4731         return 0;
4732 }
4733
4734 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4735 {
4736         struct hclge_fd_rule *rule = NULL;
4737         struct hlist_node *node2;
4738
4739         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4740                 if (rule->location >= location)
4741                         break;
4742         }
4743
4744         return  rule && rule->location == location;
4745 }
4746
4747 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4748                                      struct hclge_fd_rule *new_rule,
4749                                      u16 location,
4750                                      bool is_add)
4751 {
4752         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4753         struct hlist_node *node2;
4754
4755         if (is_add && !new_rule)
4756                 return -EINVAL;
4757
4758         hlist_for_each_entry_safe(rule, node2,
4759                                   &hdev->fd_rule_list, rule_node) {
4760                 if (rule->location >= location)
4761                         break;
4762                 parent = rule;
4763         }
4764
4765         if (rule && rule->location == location) {
4766                 hlist_del(&rule->rule_node);
4767                 kfree(rule);
4768                 hdev->hclge_fd_rule_num--;
4769
4770                 if (!is_add)
4771                         return 0;
4772
4773         } else if (!is_add) {
4774                 dev_err(&hdev->pdev->dev,
4775                         "delete fail, rule %d is inexistent\n",
4776                         location);
4777                 return -EINVAL;
4778         }
4779
4780         INIT_HLIST_NODE(&new_rule->rule_node);
4781
4782         if (parent)
4783                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4784         else
4785                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4786
4787         hdev->hclge_fd_rule_num++;
4788
4789         return 0;
4790 }
4791
4792 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4793                               struct ethtool_rx_flow_spec *fs,
4794                               struct hclge_fd_rule *rule)
4795 {
4796         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4797
4798         switch (flow_type) {
4799         case SCTP_V4_FLOW:
4800         case TCP_V4_FLOW:
4801         case UDP_V4_FLOW:
4802                 rule->tuples.src_ip[3] =
4803                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4804                 rule->tuples_mask.src_ip[3] =
4805                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4806
4807                 rule->tuples.dst_ip[3] =
4808                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4809                 rule->tuples_mask.dst_ip[3] =
4810                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4811
4812                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4813                 rule->tuples_mask.src_port =
4814                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4815
4816                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4817                 rule->tuples_mask.dst_port =
4818                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4819
4820                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4821                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4822
4823                 rule->tuples.ether_proto = ETH_P_IP;
4824                 rule->tuples_mask.ether_proto = 0xFFFF;
4825
4826                 break;
4827         case IP_USER_FLOW:
4828                 rule->tuples.src_ip[3] =
4829                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4830                 rule->tuples_mask.src_ip[3] =
4831                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4832
4833                 rule->tuples.dst_ip[3] =
4834                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4835                 rule->tuples_mask.dst_ip[3] =
4836                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4837
4838                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4839                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4840
4841                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4842                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4843
4844                 rule->tuples.ether_proto = ETH_P_IP;
4845                 rule->tuples_mask.ether_proto = 0xFFFF;
4846
4847                 break;
4848         case SCTP_V6_FLOW:
4849         case TCP_V6_FLOW:
4850         case UDP_V6_FLOW:
4851                 be32_to_cpu_array(rule->tuples.src_ip,
4852                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
4853                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4854                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
4855
4856                 be32_to_cpu_array(rule->tuples.dst_ip,
4857                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
4858                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4859                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
4860
4861                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4862                 rule->tuples_mask.src_port =
4863                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4864
4865                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4866                 rule->tuples_mask.dst_port =
4867                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4868
4869                 rule->tuples.ether_proto = ETH_P_IPV6;
4870                 rule->tuples_mask.ether_proto = 0xFFFF;
4871
4872                 break;
4873         case IPV6_USER_FLOW:
4874                 be32_to_cpu_array(rule->tuples.src_ip,
4875                                   fs->h_u.usr_ip6_spec.ip6src, 4);
4876                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4877                                   fs->m_u.usr_ip6_spec.ip6src, 4);
4878
4879                 be32_to_cpu_array(rule->tuples.dst_ip,
4880                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
4881                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4882                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
4883
4884                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4885                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4886
4887                 rule->tuples.ether_proto = ETH_P_IPV6;
4888                 rule->tuples_mask.ether_proto = 0xFFFF;
4889
4890                 break;
4891         case ETHER_FLOW:
4892                 ether_addr_copy(rule->tuples.src_mac,
4893                                 fs->h_u.ether_spec.h_source);
4894                 ether_addr_copy(rule->tuples_mask.src_mac,
4895                                 fs->m_u.ether_spec.h_source);
4896
4897                 ether_addr_copy(rule->tuples.dst_mac,
4898                                 fs->h_u.ether_spec.h_dest);
4899                 ether_addr_copy(rule->tuples_mask.dst_mac,
4900                                 fs->m_u.ether_spec.h_dest);
4901
4902                 rule->tuples.ether_proto =
4903                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4904                 rule->tuples_mask.ether_proto =
4905                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4906
4907                 break;
4908         default:
4909                 return -EOPNOTSUPP;
4910         }
4911
4912         switch (flow_type) {
4913         case SCTP_V4_FLOW:
4914         case SCTP_V6_FLOW:
4915                 rule->tuples.ip_proto = IPPROTO_SCTP;
4916                 rule->tuples_mask.ip_proto = 0xFF;
4917                 break;
4918         case TCP_V4_FLOW:
4919         case TCP_V6_FLOW:
4920                 rule->tuples.ip_proto = IPPROTO_TCP;
4921                 rule->tuples_mask.ip_proto = 0xFF;
4922                 break;
4923         case UDP_V4_FLOW:
4924         case UDP_V6_FLOW:
4925                 rule->tuples.ip_proto = IPPROTO_UDP;
4926                 rule->tuples_mask.ip_proto = 0xFF;
4927                 break;
4928         default:
4929                 break;
4930         }
4931
4932         if ((fs->flow_type & FLOW_EXT)) {
4933                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4934                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4935         }
4936
4937         if (fs->flow_type & FLOW_MAC_EXT) {
4938                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4939                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4940         }
4941
4942         return 0;
4943 }
4944
4945 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4946                               struct ethtool_rxnfc *cmd)
4947 {
4948         struct hclge_vport *vport = hclge_get_vport(handle);
4949         struct hclge_dev *hdev = vport->back;
4950         u16 dst_vport_id = 0, q_index = 0;
4951         struct ethtool_rx_flow_spec *fs;
4952         struct hclge_fd_rule *rule;
4953         u32 unused = 0;
4954         u8 action;
4955         int ret;
4956
4957         if (!hnae3_dev_fd_supported(hdev))
4958                 return -EOPNOTSUPP;
4959
4960         if (!hdev->fd_en) {
4961                 dev_warn(&hdev->pdev->dev,
4962                          "Please enable flow director first\n");
4963                 return -EOPNOTSUPP;
4964         }
4965
4966         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4967
4968         ret = hclge_fd_check_spec(hdev, fs, &unused);
4969         if (ret) {
4970                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4971                 return ret;
4972         }
4973
4974         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4975                 action = HCLGE_FD_ACTION_DROP_PACKET;
4976         } else {
4977                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4978                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4979                 u16 tqps;
4980
4981                 if (vf > hdev->num_req_vfs) {
4982                         dev_err(&hdev->pdev->dev,
4983                                 "Error: vf id (%d) > max vf num (%d)\n",
4984                                 vf, hdev->num_req_vfs);
4985                         return -EINVAL;
4986                 }
4987
4988                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4989                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4990
4991                 if (ring >= tqps) {
4992                         dev_err(&hdev->pdev->dev,
4993                                 "Error: queue id (%d) > max tqp num (%d)\n",
4994                                 ring, tqps - 1);
4995                         return -EINVAL;
4996                 }
4997
4998                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4999                 q_index = ring;
5000         }
5001
5002         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5003         if (!rule)
5004                 return -ENOMEM;
5005
5006         ret = hclge_fd_get_tuple(hdev, fs, rule);
5007         if (ret)
5008                 goto free_rule;
5009
5010         rule->flow_type = fs->flow_type;
5011
5012         rule->location = fs->location;
5013         rule->unused_tuple = unused;
5014         rule->vf_id = dst_vport_id;
5015         rule->queue_id = q_index;
5016         rule->action = action;
5017
5018         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5019         if (ret)
5020                 goto free_rule;
5021
5022         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5023         if (ret)
5024                 goto free_rule;
5025
5026         ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
5027         if (ret)
5028                 goto free_rule;
5029
5030         return ret;
5031
5032 free_rule:
5033         kfree(rule);
5034         return ret;
5035 }
5036
5037 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5038                               struct ethtool_rxnfc *cmd)
5039 {
5040         struct hclge_vport *vport = hclge_get_vport(handle);
5041         struct hclge_dev *hdev = vport->back;
5042         struct ethtool_rx_flow_spec *fs;
5043         int ret;
5044
5045         if (!hnae3_dev_fd_supported(hdev))
5046                 return -EOPNOTSUPP;
5047
5048         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5049
5050         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5051                 return -EINVAL;
5052
5053         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5054                 dev_err(&hdev->pdev->dev,
5055                         "Delete fail, rule %d is inexistent\n",
5056                         fs->location);
5057                 return -ENOENT;
5058         }
5059
5060         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5061                                    fs->location, NULL, false);
5062         if (ret)
5063                 return ret;
5064
5065         return hclge_fd_update_rule_list(hdev, NULL, fs->location,
5066                                          false);
5067 }
5068
5069 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5070                                      bool clear_list)
5071 {
5072         struct hclge_vport *vport = hclge_get_vport(handle);
5073         struct hclge_dev *hdev = vport->back;
5074         struct hclge_fd_rule *rule;
5075         struct hlist_node *node;
5076
5077         if (!hnae3_dev_fd_supported(hdev))
5078                 return;
5079
5080         if (clear_list) {
5081                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5082                                           rule_node) {
5083                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5084                                              rule->location, NULL, false);
5085                         hlist_del(&rule->rule_node);
5086                         kfree(rule);
5087                         hdev->hclge_fd_rule_num--;
5088                 }
5089         } else {
5090                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5091                                           rule_node)
5092                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5093                                              rule->location, NULL, false);
5094         }
5095 }
5096
5097 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5098 {
5099         struct hclge_vport *vport = hclge_get_vport(handle);
5100         struct hclge_dev *hdev = vport->back;
5101         struct hclge_fd_rule *rule;
5102         struct hlist_node *node;
5103         int ret;
5104
5105         /* Return ok here, because reset error handling will check this
5106          * return value. If error is returned here, the reset process will
5107          * fail.
5108          */
5109         if (!hnae3_dev_fd_supported(hdev))
5110                 return 0;
5111
5112         /* if fd is disabled, should not restore it when reset */
5113         if (!hdev->fd_en)
5114                 return 0;
5115
5116         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5117                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5118                 if (!ret)
5119                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5120
5121                 if (ret) {
5122                         dev_warn(&hdev->pdev->dev,
5123                                  "Restore rule %d failed, remove it\n",
5124                                  rule->location);
5125                         hlist_del(&rule->rule_node);
5126                         kfree(rule);
5127                         hdev->hclge_fd_rule_num--;
5128                 }
5129         }
5130         return 0;
5131 }
5132
5133 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5134                                  struct ethtool_rxnfc *cmd)
5135 {
5136         struct hclge_vport *vport = hclge_get_vport(handle);
5137         struct hclge_dev *hdev = vport->back;
5138
5139         if (!hnae3_dev_fd_supported(hdev))
5140                 return -EOPNOTSUPP;
5141
5142         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5143         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5144
5145         return 0;
5146 }
5147
5148 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5149                                   struct ethtool_rxnfc *cmd)
5150 {
5151         struct hclge_vport *vport = hclge_get_vport(handle);
5152         struct hclge_fd_rule *rule = NULL;
5153         struct hclge_dev *hdev = vport->back;
5154         struct ethtool_rx_flow_spec *fs;
5155         struct hlist_node *node2;
5156
5157         if (!hnae3_dev_fd_supported(hdev))
5158                 return -EOPNOTSUPP;
5159
5160         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5161
5162         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5163                 if (rule->location >= fs->location)
5164                         break;
5165         }
5166
5167         if (!rule || fs->location != rule->location)
5168                 return -ENOENT;
5169
5170         fs->flow_type = rule->flow_type;
5171         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5172         case SCTP_V4_FLOW:
5173         case TCP_V4_FLOW:
5174         case UDP_V4_FLOW:
5175                 fs->h_u.tcp_ip4_spec.ip4src =
5176                                 cpu_to_be32(rule->tuples.src_ip[3]);
5177                 fs->m_u.tcp_ip4_spec.ip4src =
5178                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5179                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5180
5181                 fs->h_u.tcp_ip4_spec.ip4dst =
5182                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5183                 fs->m_u.tcp_ip4_spec.ip4dst =
5184                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5185                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5186
5187                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5188                 fs->m_u.tcp_ip4_spec.psrc =
5189                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5190                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5191
5192                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5193                 fs->m_u.tcp_ip4_spec.pdst =
5194                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5195                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5196
5197                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5198                 fs->m_u.tcp_ip4_spec.tos =
5199                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5200                                 0 : rule->tuples_mask.ip_tos;
5201
5202                 break;
5203         case IP_USER_FLOW:
5204                 fs->h_u.usr_ip4_spec.ip4src =
5205                                 cpu_to_be32(rule->tuples.src_ip[3]);
5206                 fs->m_u.tcp_ip4_spec.ip4src =
5207                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5208                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5209
5210                 fs->h_u.usr_ip4_spec.ip4dst =
5211                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5212                 fs->m_u.usr_ip4_spec.ip4dst =
5213                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5214                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5215
5216                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5217                 fs->m_u.usr_ip4_spec.tos =
5218                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5219                                 0 : rule->tuples_mask.ip_tos;
5220
5221                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5222                 fs->m_u.usr_ip4_spec.proto =
5223                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5224                                 0 : rule->tuples_mask.ip_proto;
5225
5226                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5227
5228                 break;
5229         case SCTP_V6_FLOW:
5230         case TCP_V6_FLOW:
5231         case UDP_V6_FLOW:
5232                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5233                                   rule->tuples.src_ip, 4);
5234                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5235                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5236                 else
5237                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5238                                           rule->tuples_mask.src_ip, 4);
5239
5240                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5241                                   rule->tuples.dst_ip, 4);
5242                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5243                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5244                 else
5245                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5246                                           rule->tuples_mask.dst_ip, 4);
5247
5248                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5249                 fs->m_u.tcp_ip6_spec.psrc =
5250                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5251                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5252
5253                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5254                 fs->m_u.tcp_ip6_spec.pdst =
5255                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5256                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5257
5258                 break;
5259         case IPV6_USER_FLOW:
5260                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5261                                   rule->tuples.src_ip, 4);
5262                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5263                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5264                 else
5265                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5266                                           rule->tuples_mask.src_ip, 4);
5267
5268                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5269                                   rule->tuples.dst_ip, 4);
5270                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5271                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5272                 else
5273                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5274                                           rule->tuples_mask.dst_ip, 4);
5275
5276                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5277                 fs->m_u.usr_ip6_spec.l4_proto =
5278                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5279                                 0 : rule->tuples_mask.ip_proto;
5280
5281                 break;
5282         case ETHER_FLOW:
5283                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5284                                 rule->tuples.src_mac);
5285                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5286                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5287                 else
5288                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5289                                         rule->tuples_mask.src_mac);
5290
5291                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5292                                 rule->tuples.dst_mac);
5293                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5294                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5295                 else
5296                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5297                                         rule->tuples_mask.dst_mac);
5298
5299                 fs->h_u.ether_spec.h_proto =
5300                                 cpu_to_be16(rule->tuples.ether_proto);
5301                 fs->m_u.ether_spec.h_proto =
5302                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5303                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5304
5305                 break;
5306         default:
5307                 return -EOPNOTSUPP;
5308         }
5309
5310         if (fs->flow_type & FLOW_EXT) {
5311                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5312                 fs->m_ext.vlan_tci =
5313                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5314                                 cpu_to_be16(VLAN_VID_MASK) :
5315                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5316         }
5317
5318         if (fs->flow_type & FLOW_MAC_EXT) {
5319                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5320                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5321                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5322                 else
5323                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5324                                         rule->tuples_mask.dst_mac);
5325         }
5326
5327         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5328                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5329         } else {
5330                 u64 vf_id;
5331
5332                 fs->ring_cookie = rule->queue_id;
5333                 vf_id = rule->vf_id;
5334                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5335                 fs->ring_cookie |= vf_id;
5336         }
5337
5338         return 0;
5339 }
5340
5341 static int hclge_get_all_rules(struct hnae3_handle *handle,
5342                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5343 {
5344         struct hclge_vport *vport = hclge_get_vport(handle);
5345         struct hclge_dev *hdev = vport->back;
5346         struct hclge_fd_rule *rule;
5347         struct hlist_node *node2;
5348         int cnt = 0;
5349
5350         if (!hnae3_dev_fd_supported(hdev))
5351                 return -EOPNOTSUPP;
5352
5353         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5354
5355         hlist_for_each_entry_safe(rule, node2,
5356                                   &hdev->fd_rule_list, rule_node) {
5357                 if (cnt == cmd->rule_cnt)
5358                         return -EMSGSIZE;
5359
5360                 rule_locs[cnt] = rule->location;
5361                 cnt++;
5362         }
5363
5364         cmd->rule_cnt = cnt;
5365
5366         return 0;
5367 }
5368
5369 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5370 {
5371         struct hclge_vport *vport = hclge_get_vport(handle);
5372         struct hclge_dev *hdev = vport->back;
5373
5374         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5375                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5376 }
5377
5378 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5379 {
5380         struct hclge_vport *vport = hclge_get_vport(handle);
5381         struct hclge_dev *hdev = vport->back;
5382
5383         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5384 }
5385
5386 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5387 {
5388         struct hclge_vport *vport = hclge_get_vport(handle);
5389         struct hclge_dev *hdev = vport->back;
5390
5391         return hdev->rst_stats.hw_reset_done_cnt;
5392 }
5393
5394 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5395 {
5396         struct hclge_vport *vport = hclge_get_vport(handle);
5397         struct hclge_dev *hdev = vport->back;
5398
5399         hdev->fd_en = enable;
5400         if (!enable)
5401                 hclge_del_all_fd_entries(handle, false);
5402         else
5403                 hclge_restore_fd_entries(handle);
5404 }
5405
5406 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5407 {
5408         struct hclge_desc desc;
5409         struct hclge_config_mac_mode_cmd *req =
5410                 (struct hclge_config_mac_mode_cmd *)desc.data;
5411         u32 loop_en = 0;
5412         int ret;
5413
5414         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5415         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5416         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5417         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5418         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5419         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5420         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5421         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5422         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5423         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5424         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5425         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5426         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5427         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5428         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5429         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5430
5431         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5432         if (ret)
5433                 dev_err(&hdev->pdev->dev,
5434                         "mac enable fail, ret =%d.\n", ret);
5435 }
5436
5437 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5438 {
5439         struct hclge_config_mac_mode_cmd *req;
5440         struct hclge_desc desc;
5441         u32 loop_en;
5442         int ret;
5443
5444         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5445         /* 1 Read out the MAC mode config at first */
5446         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5447         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5448         if (ret) {
5449                 dev_err(&hdev->pdev->dev,
5450                         "mac loopback get fail, ret =%d.\n", ret);
5451                 return ret;
5452         }
5453
5454         /* 2 Then setup the loopback flag */
5455         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5456         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5457         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5458         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5459
5460         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5461
5462         /* 3 Config mac work mode with loopback flag
5463          * and its original configure parameters
5464          */
5465         hclge_cmd_reuse_desc(&desc, false);
5466         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5467         if (ret)
5468                 dev_err(&hdev->pdev->dev,
5469                         "mac loopback set fail, ret =%d.\n", ret);
5470         return ret;
5471 }
5472
5473 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5474                                      enum hnae3_loop loop_mode)
5475 {
5476 #define HCLGE_SERDES_RETRY_MS   10
5477 #define HCLGE_SERDES_RETRY_NUM  100
5478
5479 #define HCLGE_MAC_LINK_STATUS_MS   10
5480 #define HCLGE_MAC_LINK_STATUS_NUM  100
5481 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5482 #define HCLGE_MAC_LINK_STATUS_UP   1
5483
5484         struct hclge_serdes_lb_cmd *req;
5485         struct hclge_desc desc;
5486         int mac_link_ret = 0;
5487         int ret, i = 0;
5488         u8 loop_mode_b;
5489
5490         req = (struct hclge_serdes_lb_cmd *)desc.data;
5491         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5492
5493         switch (loop_mode) {
5494         case HNAE3_LOOP_SERIAL_SERDES:
5495                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5496                 break;
5497         case HNAE3_LOOP_PARALLEL_SERDES:
5498                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5499                 break;
5500         default:
5501                 dev_err(&hdev->pdev->dev,
5502                         "unsupported serdes loopback mode %d\n", loop_mode);
5503                 return -ENOTSUPP;
5504         }
5505
5506         if (en) {
5507                 req->enable = loop_mode_b;
5508                 req->mask = loop_mode_b;
5509                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5510         } else {
5511                 req->mask = loop_mode_b;
5512                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5513         }
5514
5515         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5516         if (ret) {
5517                 dev_err(&hdev->pdev->dev,
5518                         "serdes loopback set fail, ret = %d\n", ret);
5519                 return ret;
5520         }
5521
5522         do {
5523                 msleep(HCLGE_SERDES_RETRY_MS);
5524                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5525                                            true);
5526                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5527                 if (ret) {
5528                         dev_err(&hdev->pdev->dev,
5529                                 "serdes loopback get, ret = %d\n", ret);
5530                         return ret;
5531                 }
5532         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5533                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5534
5535         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5536                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5537                 return -EBUSY;
5538         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5539                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5540                 return -EIO;
5541         }
5542
5543         hclge_cfg_mac_mode(hdev, en);
5544
5545         i = 0;
5546         do {
5547                 /* serdes Internal loopback, independent of the network cable.*/
5548                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5549                 ret = hclge_get_mac_link_status(hdev);
5550                 if (ret == mac_link_ret)
5551                         return 0;
5552         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5553
5554         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5555
5556         return -EBUSY;
5557 }
5558
5559 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5560                             int stream_id, bool enable)
5561 {
5562         struct hclge_desc desc;
5563         struct hclge_cfg_com_tqp_queue_cmd *req =
5564                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5565         int ret;
5566
5567         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5568         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5569         req->stream_id = cpu_to_le16(stream_id);
5570         req->enable |= enable << HCLGE_TQP_ENABLE_B;
5571
5572         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5573         if (ret)
5574                 dev_err(&hdev->pdev->dev,
5575                         "Tqp enable fail, status =%d.\n", ret);
5576         return ret;
5577 }
5578
5579 static int hclge_set_loopback(struct hnae3_handle *handle,
5580                               enum hnae3_loop loop_mode, bool en)
5581 {
5582         struct hclge_vport *vport = hclge_get_vport(handle);
5583         struct hnae3_knic_private_info *kinfo;
5584         struct hclge_dev *hdev = vport->back;
5585         int i, ret;
5586
5587         switch (loop_mode) {
5588         case HNAE3_LOOP_APP:
5589                 ret = hclge_set_app_loopback(hdev, en);
5590                 break;
5591         case HNAE3_LOOP_SERIAL_SERDES:
5592         case HNAE3_LOOP_PARALLEL_SERDES:
5593                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5594                 break;
5595         default:
5596                 ret = -ENOTSUPP;
5597                 dev_err(&hdev->pdev->dev,
5598                         "loop_mode %d is not supported\n", loop_mode);
5599                 break;
5600         }
5601
5602         if (ret)
5603                 return ret;
5604
5605         kinfo = &vport->nic.kinfo;
5606         for (i = 0; i < kinfo->num_tqps; i++) {
5607                 ret = hclge_tqp_enable(hdev, i, 0, en);
5608                 if (ret)
5609                         return ret;
5610         }
5611
5612         return 0;
5613 }
5614
5615 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5616 {
5617         struct hclge_vport *vport = hclge_get_vport(handle);
5618         struct hnae3_knic_private_info *kinfo;
5619         struct hnae3_queue *queue;
5620         struct hclge_tqp *tqp;
5621         int i;
5622
5623         kinfo = &vport->nic.kinfo;
5624         for (i = 0; i < kinfo->num_tqps; i++) {
5625                 queue = handle->kinfo.tqp[i];
5626                 tqp = container_of(queue, struct hclge_tqp, q);
5627                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5628         }
5629 }
5630
5631 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5632 {
5633         struct hclge_vport *vport = hclge_get_vport(handle);
5634         struct hclge_dev *hdev = vport->back;
5635
5636         if (enable) {
5637                 mod_timer(&hdev->service_timer, jiffies + HZ);
5638         } else {
5639                 del_timer_sync(&hdev->service_timer);
5640                 cancel_work_sync(&hdev->service_task);
5641                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5642         }
5643 }
5644
5645 static int hclge_ae_start(struct hnae3_handle *handle)
5646 {
5647         struct hclge_vport *vport = hclge_get_vport(handle);
5648         struct hclge_dev *hdev = vport->back;
5649
5650         /* mac enable */
5651         hclge_cfg_mac_mode(hdev, true);
5652         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5653         hdev->hw.mac.link = 0;
5654
5655         /* reset tqp stats */
5656         hclge_reset_tqp_stats(handle);
5657
5658         hclge_mac_start_phy(hdev);
5659
5660         return 0;
5661 }
5662
5663 static void hclge_ae_stop(struct hnae3_handle *handle)
5664 {
5665         struct hclge_vport *vport = hclge_get_vport(handle);
5666         struct hclge_dev *hdev = vport->back;
5667         int i;
5668
5669         set_bit(HCLGE_STATE_DOWN, &hdev->state);
5670
5671         /* If it is not PF reset, the firmware will disable the MAC,
5672          * so it only need to stop phy here.
5673          */
5674         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5675             hdev->reset_type != HNAE3_FUNC_RESET) {
5676                 hclge_mac_stop_phy(hdev);
5677                 return;
5678         }
5679
5680         for (i = 0; i < handle->kinfo.num_tqps; i++)
5681                 hclge_reset_tqp(handle, i);
5682
5683         /* Mac disable */
5684         hclge_cfg_mac_mode(hdev, false);
5685
5686         hclge_mac_stop_phy(hdev);
5687
5688         /* reset tqp stats */
5689         hclge_reset_tqp_stats(handle);
5690         hclge_update_link_status(hdev);
5691 }
5692
5693 int hclge_vport_start(struct hclge_vport *vport)
5694 {
5695         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5696         vport->last_active_jiffies = jiffies;
5697         return 0;
5698 }
5699
5700 void hclge_vport_stop(struct hclge_vport *vport)
5701 {
5702         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5703 }
5704
5705 static int hclge_client_start(struct hnae3_handle *handle)
5706 {
5707         struct hclge_vport *vport = hclge_get_vport(handle);
5708
5709         return hclge_vport_start(vport);
5710 }
5711
5712 static void hclge_client_stop(struct hnae3_handle *handle)
5713 {
5714         struct hclge_vport *vport = hclge_get_vport(handle);
5715
5716         hclge_vport_stop(vport);
5717 }
5718
5719 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5720                                          u16 cmdq_resp, u8  resp_code,
5721                                          enum hclge_mac_vlan_tbl_opcode op)
5722 {
5723         struct hclge_dev *hdev = vport->back;
5724         int return_status = -EIO;
5725
5726         if (cmdq_resp) {
5727                 dev_err(&hdev->pdev->dev,
5728                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5729                         cmdq_resp);
5730                 return -EIO;
5731         }
5732
5733         if (op == HCLGE_MAC_VLAN_ADD) {
5734                 if ((!resp_code) || (resp_code == 1)) {
5735                         return_status = 0;
5736                 } else if (resp_code == 2) {
5737                         return_status = -ENOSPC;
5738                         dev_err(&hdev->pdev->dev,
5739                                 "add mac addr failed for uc_overflow.\n");
5740                 } else if (resp_code == 3) {
5741                         return_status = -ENOSPC;
5742                         dev_err(&hdev->pdev->dev,
5743                                 "add mac addr failed for mc_overflow.\n");
5744                 } else {
5745                         dev_err(&hdev->pdev->dev,
5746                                 "add mac addr failed for undefined, code=%d.\n",
5747                                 resp_code);
5748                 }
5749         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5750                 if (!resp_code) {
5751                         return_status = 0;
5752                 } else if (resp_code == 1) {
5753                         return_status = -ENOENT;
5754                         dev_dbg(&hdev->pdev->dev,
5755                                 "remove mac addr failed for miss.\n");
5756                 } else {
5757                         dev_err(&hdev->pdev->dev,
5758                                 "remove mac addr failed for undefined, code=%d.\n",
5759                                 resp_code);
5760                 }
5761         } else if (op == HCLGE_MAC_VLAN_LKUP) {
5762                 if (!resp_code) {
5763                         return_status = 0;
5764                 } else if (resp_code == 1) {
5765                         return_status = -ENOENT;
5766                         dev_dbg(&hdev->pdev->dev,
5767                                 "lookup mac addr failed for miss.\n");
5768                 } else {
5769                         dev_err(&hdev->pdev->dev,
5770                                 "lookup mac addr failed for undefined, code=%d.\n",
5771                                 resp_code);
5772                 }
5773         } else {
5774                 return_status = -EINVAL;
5775                 dev_err(&hdev->pdev->dev,
5776                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5777                         op);
5778         }
5779
5780         return return_status;
5781 }
5782
5783 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5784 {
5785         int word_num;
5786         int bit_num;
5787
5788         if (vfid > 255 || vfid < 0)
5789                 return -EIO;
5790
5791         if (vfid >= 0 && vfid <= 191) {
5792                 word_num = vfid / 32;
5793                 bit_num  = vfid % 32;
5794                 if (clr)
5795                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5796                 else
5797                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5798         } else {
5799                 word_num = (vfid - 192) / 32;
5800                 bit_num  = vfid % 32;
5801                 if (clr)
5802                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5803                 else
5804                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5805         }
5806
5807         return 0;
5808 }
5809
5810 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5811 {
5812 #define HCLGE_DESC_NUMBER 3
5813 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5814         int i, j;
5815
5816         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5817                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5818                         if (desc[i].data[j])
5819                                 return false;
5820
5821         return true;
5822 }
5823
5824 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5825                                    const u8 *addr, bool is_mc)
5826 {
5827         const unsigned char *mac_addr = addr;
5828         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5829                        (mac_addr[0]) | (mac_addr[1] << 8);
5830         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5831
5832         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5833         if (is_mc) {
5834                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5835                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5836         }
5837
5838         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5839         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5840 }
5841
5842 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5843                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
5844 {
5845         struct hclge_dev *hdev = vport->back;
5846         struct hclge_desc desc;
5847         u8 resp_code;
5848         u16 retval;
5849         int ret;
5850
5851         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5852
5853         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5854
5855         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5856         if (ret) {
5857                 dev_err(&hdev->pdev->dev,
5858                         "del mac addr failed for cmd_send, ret =%d.\n",
5859                         ret);
5860                 return ret;
5861         }
5862         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5863         retval = le16_to_cpu(desc.retval);
5864
5865         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5866                                              HCLGE_MAC_VLAN_REMOVE);
5867 }
5868
5869 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5870                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
5871                                      struct hclge_desc *desc,
5872                                      bool is_mc)
5873 {
5874         struct hclge_dev *hdev = vport->back;
5875         u8 resp_code;
5876         u16 retval;
5877         int ret;
5878
5879         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5880         if (is_mc) {
5881                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5882                 memcpy(desc[0].data,
5883                        req,
5884                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5885                 hclge_cmd_setup_basic_desc(&desc[1],
5886                                            HCLGE_OPC_MAC_VLAN_ADD,
5887                                            true);
5888                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5889                 hclge_cmd_setup_basic_desc(&desc[2],
5890                                            HCLGE_OPC_MAC_VLAN_ADD,
5891                                            true);
5892                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5893         } else {
5894                 memcpy(desc[0].data,
5895                        req,
5896                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5897                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5898         }
5899         if (ret) {
5900                 dev_err(&hdev->pdev->dev,
5901                         "lookup mac addr failed for cmd_send, ret =%d.\n",
5902                         ret);
5903                 return ret;
5904         }
5905         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5906         retval = le16_to_cpu(desc[0].retval);
5907
5908         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5909                                              HCLGE_MAC_VLAN_LKUP);
5910 }
5911
5912 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5913                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
5914                                   struct hclge_desc *mc_desc)
5915 {
5916         struct hclge_dev *hdev = vport->back;
5917         int cfg_status;
5918         u8 resp_code;
5919         u16 retval;
5920         int ret;
5921
5922         if (!mc_desc) {
5923                 struct hclge_desc desc;
5924
5925                 hclge_cmd_setup_basic_desc(&desc,
5926                                            HCLGE_OPC_MAC_VLAN_ADD,
5927                                            false);
5928                 memcpy(desc.data, req,
5929                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5930                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5931                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5932                 retval = le16_to_cpu(desc.retval);
5933
5934                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5935                                                            resp_code,
5936                                                            HCLGE_MAC_VLAN_ADD);
5937         } else {
5938                 hclge_cmd_reuse_desc(&mc_desc[0], false);
5939                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5940                 hclge_cmd_reuse_desc(&mc_desc[1], false);
5941                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5942                 hclge_cmd_reuse_desc(&mc_desc[2], false);
5943                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5944                 memcpy(mc_desc[0].data, req,
5945                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5946                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5947                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5948                 retval = le16_to_cpu(mc_desc[0].retval);
5949
5950                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5951                                                            resp_code,
5952                                                            HCLGE_MAC_VLAN_ADD);
5953         }
5954
5955         if (ret) {
5956                 dev_err(&hdev->pdev->dev,
5957                         "add mac addr failed for cmd_send, ret =%d.\n",
5958                         ret);
5959                 return ret;
5960         }
5961
5962         return cfg_status;
5963 }
5964
5965 static int hclge_init_umv_space(struct hclge_dev *hdev)
5966 {
5967         u16 allocated_size = 0;
5968         int ret;
5969
5970         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5971                                   true);
5972         if (ret)
5973                 return ret;
5974
5975         if (allocated_size < hdev->wanted_umv_size)
5976                 dev_warn(&hdev->pdev->dev,
5977                          "Alloc umv space failed, want %d, get %d\n",
5978                          hdev->wanted_umv_size, allocated_size);
5979
5980         mutex_init(&hdev->umv_mutex);
5981         hdev->max_umv_size = allocated_size;
5982         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5983         hdev->share_umv_size = hdev->priv_umv_size +
5984                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
5985
5986         return 0;
5987 }
5988
5989 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5990 {
5991         int ret;
5992
5993         if (hdev->max_umv_size > 0) {
5994                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5995                                           false);
5996                 if (ret)
5997                         return ret;
5998                 hdev->max_umv_size = 0;
5999         }
6000         mutex_destroy(&hdev->umv_mutex);
6001
6002         return 0;
6003 }
6004
6005 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6006                                u16 *allocated_size, bool is_alloc)
6007 {
6008         struct hclge_umv_spc_alc_cmd *req;
6009         struct hclge_desc desc;
6010         int ret;
6011
6012         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6013         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6014         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6015         req->space_size = cpu_to_le32(space_size);
6016
6017         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6018         if (ret) {
6019                 dev_err(&hdev->pdev->dev,
6020                         "%s umv space failed for cmd_send, ret =%d\n",
6021                         is_alloc ? "allocate" : "free", ret);
6022                 return ret;
6023         }
6024
6025         if (is_alloc && allocated_size)
6026                 *allocated_size = le32_to_cpu(desc.data[1]);
6027
6028         return 0;
6029 }
6030
6031 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6032 {
6033         struct hclge_vport *vport;
6034         int i;
6035
6036         for (i = 0; i < hdev->num_alloc_vport; i++) {
6037                 vport = &hdev->vport[i];
6038                 vport->used_umv_num = 0;
6039         }
6040
6041         mutex_lock(&hdev->umv_mutex);
6042         hdev->share_umv_size = hdev->priv_umv_size +
6043                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6044         mutex_unlock(&hdev->umv_mutex);
6045 }
6046
6047 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6048 {
6049         struct hclge_dev *hdev = vport->back;
6050         bool is_full;
6051
6052         mutex_lock(&hdev->umv_mutex);
6053         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6054                    hdev->share_umv_size == 0);
6055         mutex_unlock(&hdev->umv_mutex);
6056
6057         return is_full;
6058 }
6059
6060 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6061 {
6062         struct hclge_dev *hdev = vport->back;
6063
6064         mutex_lock(&hdev->umv_mutex);
6065         if (is_free) {
6066                 if (vport->used_umv_num > hdev->priv_umv_size)
6067                         hdev->share_umv_size++;
6068
6069                 if (vport->used_umv_num > 0)
6070                         vport->used_umv_num--;
6071         } else {
6072                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6073                     hdev->share_umv_size > 0)
6074                         hdev->share_umv_size--;
6075                 vport->used_umv_num++;
6076         }
6077         mutex_unlock(&hdev->umv_mutex);
6078 }
6079
6080 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6081                              const unsigned char *addr)
6082 {
6083         struct hclge_vport *vport = hclge_get_vport(handle);
6084
6085         return hclge_add_uc_addr_common(vport, addr);
6086 }
6087
6088 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6089                              const unsigned char *addr)
6090 {
6091         struct hclge_dev *hdev = vport->back;
6092         struct hclge_mac_vlan_tbl_entry_cmd req;
6093         struct hclge_desc desc;
6094         u16 egress_port = 0;
6095         int ret;
6096
6097         /* mac addr check */
6098         if (is_zero_ether_addr(addr) ||
6099             is_broadcast_ether_addr(addr) ||
6100             is_multicast_ether_addr(addr)) {
6101                 dev_err(&hdev->pdev->dev,
6102                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6103                          addr,
6104                          is_zero_ether_addr(addr),
6105                          is_broadcast_ether_addr(addr),
6106                          is_multicast_ether_addr(addr));
6107                 return -EINVAL;
6108         }
6109
6110         memset(&req, 0, sizeof(req));
6111
6112         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6113                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6114
6115         req.egress_port = cpu_to_le16(egress_port);
6116
6117         hclge_prepare_mac_addr(&req, addr, false);
6118
6119         /* Lookup the mac address in the mac_vlan table, and add
6120          * it if the entry is inexistent. Repeated unicast entry
6121          * is not allowed in the mac vlan table.
6122          */
6123         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6124         if (ret == -ENOENT) {
6125                 if (!hclge_is_umv_space_full(vport)) {
6126                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6127                         if (!ret)
6128                                 hclge_update_umv_space(vport, false);
6129                         return ret;
6130                 }
6131
6132                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6133                         hdev->priv_umv_size);
6134
6135                 return -ENOSPC;
6136         }
6137
6138         /* check if we just hit the duplicate */
6139         if (!ret) {
6140                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6141                          vport->vport_id, addr);
6142                 return 0;
6143         }
6144
6145         dev_err(&hdev->pdev->dev,
6146                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6147                 addr);
6148
6149         return ret;
6150 }
6151
6152 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6153                             const unsigned char *addr)
6154 {
6155         struct hclge_vport *vport = hclge_get_vport(handle);
6156
6157         return hclge_rm_uc_addr_common(vport, addr);
6158 }
6159
6160 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6161                             const unsigned char *addr)
6162 {
6163         struct hclge_dev *hdev = vport->back;
6164         struct hclge_mac_vlan_tbl_entry_cmd req;
6165         int ret;
6166
6167         /* mac addr check */
6168         if (is_zero_ether_addr(addr) ||
6169             is_broadcast_ether_addr(addr) ||
6170             is_multicast_ether_addr(addr)) {
6171                 dev_dbg(&hdev->pdev->dev,
6172                         "Remove mac err! invalid mac:%pM.\n",
6173                          addr);
6174                 return -EINVAL;
6175         }
6176
6177         memset(&req, 0, sizeof(req));
6178         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6179         hclge_prepare_mac_addr(&req, addr, false);
6180         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6181         if (!ret)
6182                 hclge_update_umv_space(vport, true);
6183
6184         return ret;
6185 }
6186
6187 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6188                              const unsigned char *addr)
6189 {
6190         struct hclge_vport *vport = hclge_get_vport(handle);
6191
6192         return hclge_add_mc_addr_common(vport, addr);
6193 }
6194
6195 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6196                              const unsigned char *addr)
6197 {
6198         struct hclge_dev *hdev = vport->back;
6199         struct hclge_mac_vlan_tbl_entry_cmd req;
6200         struct hclge_desc desc[3];
6201         int status;
6202
6203         /* mac addr check */
6204         if (!is_multicast_ether_addr(addr)) {
6205                 dev_err(&hdev->pdev->dev,
6206                         "Add mc mac err! invalid mac:%pM.\n",
6207                          addr);
6208                 return -EINVAL;
6209         }
6210         memset(&req, 0, sizeof(req));
6211         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6212         hclge_prepare_mac_addr(&req, addr, true);
6213         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6214         if (!status) {
6215                 /* This mac addr exist, update VFID for it */
6216                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6217                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6218         } else {
6219                 /* This mac addr do not exist, add new entry for it */
6220                 memset(desc[0].data, 0, sizeof(desc[0].data));
6221                 memset(desc[1].data, 0, sizeof(desc[0].data));
6222                 memset(desc[2].data, 0, sizeof(desc[0].data));
6223                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6224                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6225         }
6226
6227         if (status == -ENOSPC)
6228                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6229
6230         return status;
6231 }
6232
6233 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6234                             const unsigned char *addr)
6235 {
6236         struct hclge_vport *vport = hclge_get_vport(handle);
6237
6238         return hclge_rm_mc_addr_common(vport, addr);
6239 }
6240
6241 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6242                             const unsigned char *addr)
6243 {
6244         struct hclge_dev *hdev = vport->back;
6245         struct hclge_mac_vlan_tbl_entry_cmd req;
6246         enum hclge_cmd_status status;
6247         struct hclge_desc desc[3];
6248
6249         /* mac addr check */
6250         if (!is_multicast_ether_addr(addr)) {
6251                 dev_dbg(&hdev->pdev->dev,
6252                         "Remove mc mac err! invalid mac:%pM.\n",
6253                          addr);
6254                 return -EINVAL;
6255         }
6256
6257         memset(&req, 0, sizeof(req));
6258         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6259         hclge_prepare_mac_addr(&req, addr, true);
6260         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6261         if (!status) {
6262                 /* This mac addr exist, remove this handle's VFID for it */
6263                 hclge_update_desc_vfid(desc, vport->vport_id, true);
6264
6265                 if (hclge_is_all_function_id_zero(desc))
6266                         /* All the vfid is zero, so need to delete this entry */
6267                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6268                 else
6269                         /* Not all the vfid is zero, update the vfid */
6270                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6271
6272         } else {
6273                 /* Maybe this mac address is in mta table, but it cannot be
6274                  * deleted here because an entry of mta represents an address
6275                  * range rather than a specific address. the delete action to
6276                  * all entries will take effect in update_mta_status called by
6277                  * hns3_nic_set_rx_mode.
6278                  */
6279                 status = 0;
6280         }
6281
6282         return status;
6283 }
6284
6285 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6286                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6287 {
6288         struct hclge_vport_mac_addr_cfg *mac_cfg;
6289         struct list_head *list;
6290
6291         if (!vport->vport_id)
6292                 return;
6293
6294         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6295         if (!mac_cfg)
6296                 return;
6297
6298         mac_cfg->hd_tbl_status = true;
6299         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6300
6301         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6302                &vport->uc_mac_list : &vport->mc_mac_list;
6303
6304         list_add_tail(&mac_cfg->node, list);
6305 }
6306
6307 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6308                               bool is_write_tbl,
6309                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6310 {
6311         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6312         struct list_head *list;
6313         bool uc_flag, mc_flag;
6314
6315         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6316                &vport->uc_mac_list : &vport->mc_mac_list;
6317
6318         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6319         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6320
6321         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6322                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6323                         if (uc_flag && mac_cfg->hd_tbl_status)
6324                                 hclge_rm_uc_addr_common(vport, mac_addr);
6325
6326                         if (mc_flag && mac_cfg->hd_tbl_status)
6327                                 hclge_rm_mc_addr_common(vport, mac_addr);
6328
6329                         list_del(&mac_cfg->node);
6330                         kfree(mac_cfg);
6331                         break;
6332                 }
6333         }
6334 }
6335
6336 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6337                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6338 {
6339         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6340         struct list_head *list;
6341
6342         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6343                &vport->uc_mac_list : &vport->mc_mac_list;
6344
6345         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6346                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6347                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6348
6349                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6350                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6351
6352                 mac_cfg->hd_tbl_status = false;
6353                 if (is_del_list) {
6354                         list_del(&mac_cfg->node);
6355                         kfree(mac_cfg);
6356                 }
6357         }
6358 }
6359
6360 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6361 {
6362         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6363         struct hclge_vport *vport;
6364         int i;
6365
6366         mutex_lock(&hdev->vport_cfg_mutex);
6367         for (i = 0; i < hdev->num_alloc_vport; i++) {
6368                 vport = &hdev->vport[i];
6369                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6370                         list_del(&mac->node);
6371                         kfree(mac);
6372                 }
6373
6374                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6375                         list_del(&mac->node);
6376                         kfree(mac);
6377                 }
6378         }
6379         mutex_unlock(&hdev->vport_cfg_mutex);
6380 }
6381
6382 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6383                                               u16 cmdq_resp, u8 resp_code)
6384 {
6385 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6386 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6387 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6388 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6389
6390         int return_status;
6391
6392         if (cmdq_resp) {
6393                 dev_err(&hdev->pdev->dev,
6394                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6395                         cmdq_resp);
6396                 return -EIO;
6397         }
6398
6399         switch (resp_code) {
6400         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6401         case HCLGE_ETHERTYPE_ALREADY_ADD:
6402                 return_status = 0;
6403                 break;
6404         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6405                 dev_err(&hdev->pdev->dev,
6406                         "add mac ethertype failed for manager table overflow.\n");
6407                 return_status = -EIO;
6408                 break;
6409         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6410                 dev_err(&hdev->pdev->dev,
6411                         "add mac ethertype failed for key conflict.\n");
6412                 return_status = -EIO;
6413                 break;
6414         default:
6415                 dev_err(&hdev->pdev->dev,
6416                         "add mac ethertype failed for undefined, code=%d.\n",
6417                         resp_code);
6418                 return_status = -EIO;
6419         }
6420
6421         return return_status;
6422 }
6423
6424 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6425                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6426 {
6427         struct hclge_desc desc;
6428         u8 resp_code;
6429         u16 retval;
6430         int ret;
6431
6432         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6433         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6434
6435         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6436         if (ret) {
6437                 dev_err(&hdev->pdev->dev,
6438                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6439                         ret);
6440                 return ret;
6441         }
6442
6443         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6444         retval = le16_to_cpu(desc.retval);
6445
6446         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6447 }
6448
6449 static int init_mgr_tbl(struct hclge_dev *hdev)
6450 {
6451         int ret;
6452         int i;
6453
6454         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6455                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6456                 if (ret) {
6457                         dev_err(&hdev->pdev->dev,
6458                                 "add mac ethertype failed, ret =%d.\n",
6459                                 ret);
6460                         return ret;
6461                 }
6462         }
6463
6464         return 0;
6465 }
6466
6467 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6468 {
6469         struct hclge_vport *vport = hclge_get_vport(handle);
6470         struct hclge_dev *hdev = vport->back;
6471
6472         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6473 }
6474
6475 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6476                               bool is_first)
6477 {
6478         const unsigned char *new_addr = (const unsigned char *)p;
6479         struct hclge_vport *vport = hclge_get_vport(handle);
6480         struct hclge_dev *hdev = vport->back;
6481         int ret;
6482
6483         /* mac addr check */
6484         if (is_zero_ether_addr(new_addr) ||
6485             is_broadcast_ether_addr(new_addr) ||
6486             is_multicast_ether_addr(new_addr)) {
6487                 dev_err(&hdev->pdev->dev,
6488                         "Change uc mac err! invalid mac:%p.\n",
6489                          new_addr);
6490                 return -EINVAL;
6491         }
6492
6493         if ((!is_first || is_kdump_kernel()) &&
6494             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6495                 dev_warn(&hdev->pdev->dev,
6496                          "remove old uc mac address fail.\n");
6497
6498         ret = hclge_add_uc_addr(handle, new_addr);
6499         if (ret) {
6500                 dev_err(&hdev->pdev->dev,
6501                         "add uc mac address fail, ret =%d.\n",
6502                         ret);
6503
6504                 if (!is_first &&
6505                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6506                         dev_err(&hdev->pdev->dev,
6507                                 "restore uc mac address fail.\n");
6508
6509                 return -EIO;
6510         }
6511
6512         ret = hclge_pause_addr_cfg(hdev, new_addr);
6513         if (ret) {
6514                 dev_err(&hdev->pdev->dev,
6515                         "configure mac pause address fail, ret =%d.\n",
6516                         ret);
6517                 return -EIO;
6518         }
6519
6520         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6521
6522         return 0;
6523 }
6524
6525 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6526                           int cmd)
6527 {
6528         struct hclge_vport *vport = hclge_get_vport(handle);
6529         struct hclge_dev *hdev = vport->back;
6530
6531         if (!hdev->hw.mac.phydev)
6532                 return -EOPNOTSUPP;
6533
6534         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6535 }
6536
6537 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6538                                       u8 fe_type, bool filter_en, u8 vf_id)
6539 {
6540         struct hclge_vlan_filter_ctrl_cmd *req;
6541         struct hclge_desc desc;
6542         int ret;
6543
6544         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6545
6546         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6547         req->vlan_type = vlan_type;
6548         req->vlan_fe = filter_en ? fe_type : 0;
6549         req->vf_id = vf_id;
6550
6551         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6552         if (ret)
6553                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6554                         ret);
6555
6556         return ret;
6557 }
6558
6559 #define HCLGE_FILTER_TYPE_VF            0
6560 #define HCLGE_FILTER_TYPE_PORT          1
6561 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
6562 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
6563 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
6564 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
6565 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
6566 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
6567                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6568 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
6569                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6570
6571 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6572 {
6573         struct hclge_vport *vport = hclge_get_vport(handle);
6574         struct hclge_dev *hdev = vport->back;
6575
6576         if (hdev->pdev->revision >= 0x21) {
6577                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6578                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
6579                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6580                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
6581         } else {
6582                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6583                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6584                                            0);
6585         }
6586         if (enable)
6587                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6588         else
6589                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6590 }
6591
6592 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6593                                     bool is_kill, u16 vlan, u8 qos,
6594                                     __be16 proto)
6595 {
6596 #define HCLGE_MAX_VF_BYTES  16
6597         struct hclge_vlan_filter_vf_cfg_cmd *req0;
6598         struct hclge_vlan_filter_vf_cfg_cmd *req1;
6599         struct hclge_desc desc[2];
6600         u8 vf_byte_val;
6601         u8 vf_byte_off;
6602         int ret;
6603
6604         hclge_cmd_setup_basic_desc(&desc[0],
6605                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6606         hclge_cmd_setup_basic_desc(&desc[1],
6607                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6608
6609         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6610
6611         vf_byte_off = vfid / 8;
6612         vf_byte_val = 1 << (vfid % 8);
6613
6614         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6615         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6616
6617         req0->vlan_id  = cpu_to_le16(vlan);
6618         req0->vlan_cfg = is_kill;
6619
6620         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6621                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6622         else
6623                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6624
6625         ret = hclge_cmd_send(&hdev->hw, desc, 2);
6626         if (ret) {
6627                 dev_err(&hdev->pdev->dev,
6628                         "Send vf vlan command fail, ret =%d.\n",
6629                         ret);
6630                 return ret;
6631         }
6632
6633         if (!is_kill) {
6634 #define HCLGE_VF_VLAN_NO_ENTRY  2
6635                 if (!req0->resp_code || req0->resp_code == 1)
6636                         return 0;
6637
6638                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6639                         dev_warn(&hdev->pdev->dev,
6640                                  "vf vlan table is full, vf vlan filter is disabled\n");
6641                         return 0;
6642                 }
6643
6644                 dev_err(&hdev->pdev->dev,
6645                         "Add vf vlan filter fail, ret =%d.\n",
6646                         req0->resp_code);
6647         } else {
6648 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
6649                 if (!req0->resp_code)
6650                         return 0;
6651
6652                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6653                         dev_warn(&hdev->pdev->dev,
6654                                  "vlan %d filter is not in vf vlan table\n",
6655                                  vlan);
6656                         return 0;
6657                 }
6658
6659                 dev_err(&hdev->pdev->dev,
6660                         "Kill vf vlan filter fail, ret =%d.\n",
6661                         req0->resp_code);
6662         }
6663
6664         return -EIO;
6665 }
6666
6667 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6668                                       u16 vlan_id, bool is_kill)
6669 {
6670         struct hclge_vlan_filter_pf_cfg_cmd *req;
6671         struct hclge_desc desc;
6672         u8 vlan_offset_byte_val;
6673         u8 vlan_offset_byte;
6674         u8 vlan_offset_160;
6675         int ret;
6676
6677         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6678
6679         vlan_offset_160 = vlan_id / 160;
6680         vlan_offset_byte = (vlan_id % 160) / 8;
6681         vlan_offset_byte_val = 1 << (vlan_id % 8);
6682
6683         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6684         req->vlan_offset = vlan_offset_160;
6685         req->vlan_cfg = is_kill;
6686         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6687
6688         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6689         if (ret)
6690                 dev_err(&hdev->pdev->dev,
6691                         "port vlan command, send fail, ret =%d.\n", ret);
6692         return ret;
6693 }
6694
6695 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6696                                     u16 vport_id, u16 vlan_id, u8 qos,
6697                                     bool is_kill)
6698 {
6699         u16 vport_idx, vport_num = 0;
6700         int ret;
6701
6702         if (is_kill && !vlan_id)
6703                 return 0;
6704
6705         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6706                                        0, proto);
6707         if (ret) {
6708                 dev_err(&hdev->pdev->dev,
6709                         "Set %d vport vlan filter config fail, ret =%d.\n",
6710                         vport_id, ret);
6711                 return ret;
6712         }
6713
6714         /* vlan 0 may be added twice when 8021q module is enabled */
6715         if (!is_kill && !vlan_id &&
6716             test_bit(vport_id, hdev->vlan_table[vlan_id]))
6717                 return 0;
6718
6719         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6720                 dev_err(&hdev->pdev->dev,
6721                         "Add port vlan failed, vport %d is already in vlan %d\n",
6722                         vport_id, vlan_id);
6723                 return -EINVAL;
6724         }
6725
6726         if (is_kill &&
6727             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6728                 dev_err(&hdev->pdev->dev,
6729                         "Delete port vlan failed, vport %d is not in vlan %d\n",
6730                         vport_id, vlan_id);
6731                 return -EINVAL;
6732         }
6733
6734         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6735                 vport_num++;
6736
6737         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6738                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6739                                                  is_kill);
6740
6741         return ret;
6742 }
6743
6744 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6745 {
6746         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6747         struct hclge_vport_vtag_tx_cfg_cmd *req;
6748         struct hclge_dev *hdev = vport->back;
6749         struct hclge_desc desc;
6750         int status;
6751
6752         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6753
6754         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6755         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6756         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6757         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6758                       vcfg->accept_tag1 ? 1 : 0);
6759         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6760                       vcfg->accept_untag1 ? 1 : 0);
6761         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6762                       vcfg->accept_tag2 ? 1 : 0);
6763         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6764                       vcfg->accept_untag2 ? 1 : 0);
6765         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6766                       vcfg->insert_tag1_en ? 1 : 0);
6767         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6768                       vcfg->insert_tag2_en ? 1 : 0);
6769         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6770
6771         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6772         req->vf_bitmap[req->vf_offset] =
6773                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6774
6775         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6776         if (status)
6777                 dev_err(&hdev->pdev->dev,
6778                         "Send port txvlan cfg command fail, ret =%d\n",
6779                         status);
6780
6781         return status;
6782 }
6783
6784 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6785 {
6786         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6787         struct hclge_vport_vtag_rx_cfg_cmd *req;
6788         struct hclge_dev *hdev = vport->back;
6789         struct hclge_desc desc;
6790         int status;
6791
6792         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6793
6794         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6795         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6796                       vcfg->strip_tag1_en ? 1 : 0);
6797         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6798                       vcfg->strip_tag2_en ? 1 : 0);
6799         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6800                       vcfg->vlan1_vlan_prionly ? 1 : 0);
6801         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6802                       vcfg->vlan2_vlan_prionly ? 1 : 0);
6803
6804         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6805         req->vf_bitmap[req->vf_offset] =
6806                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6807
6808         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6809         if (status)
6810                 dev_err(&hdev->pdev->dev,
6811                         "Send port rxvlan cfg command fail, ret =%d\n",
6812                         status);
6813
6814         return status;
6815 }
6816
6817 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
6818                                   u16 port_base_vlan_state,
6819                                   u16 vlan_tag)
6820 {
6821         int ret;
6822
6823         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6824                 vport->txvlan_cfg.accept_tag1 = true;
6825                 vport->txvlan_cfg.insert_tag1_en = false;
6826                 vport->txvlan_cfg.default_tag1 = 0;
6827         } else {
6828                 vport->txvlan_cfg.accept_tag1 = false;
6829                 vport->txvlan_cfg.insert_tag1_en = true;
6830                 vport->txvlan_cfg.default_tag1 = vlan_tag;
6831         }
6832
6833         vport->txvlan_cfg.accept_untag1 = true;
6834
6835         /* accept_tag2 and accept_untag2 are not supported on
6836          * pdev revision(0x20), new revision support them,
6837          * this two fields can not be configured by user.
6838          */
6839         vport->txvlan_cfg.accept_tag2 = true;
6840         vport->txvlan_cfg.accept_untag2 = true;
6841         vport->txvlan_cfg.insert_tag2_en = false;
6842         vport->txvlan_cfg.default_tag2 = 0;
6843
6844         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6845                 vport->rxvlan_cfg.strip_tag1_en = false;
6846                 vport->rxvlan_cfg.strip_tag2_en =
6847                                 vport->rxvlan_cfg.rx_vlan_offload_en;
6848         } else {
6849                 vport->rxvlan_cfg.strip_tag1_en =
6850                                 vport->rxvlan_cfg.rx_vlan_offload_en;
6851                 vport->rxvlan_cfg.strip_tag2_en = true;
6852         }
6853         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6854         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6855
6856         ret = hclge_set_vlan_tx_offload_cfg(vport);
6857         if (ret)
6858                 return ret;
6859
6860         return hclge_set_vlan_rx_offload_cfg(vport);
6861 }
6862
6863 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6864 {
6865         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6866         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6867         struct hclge_desc desc;
6868         int status;
6869
6870         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6871         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6872         rx_req->ot_fst_vlan_type =
6873                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6874         rx_req->ot_sec_vlan_type =
6875                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6876         rx_req->in_fst_vlan_type =
6877                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6878         rx_req->in_sec_vlan_type =
6879                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6880
6881         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6882         if (status) {
6883                 dev_err(&hdev->pdev->dev,
6884                         "Send rxvlan protocol type command fail, ret =%d\n",
6885                         status);
6886                 return status;
6887         }
6888
6889         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6890
6891         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6892         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6893         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6894
6895         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6896         if (status)
6897                 dev_err(&hdev->pdev->dev,
6898                         "Send txvlan protocol type command fail, ret =%d\n",
6899                         status);
6900
6901         return status;
6902 }
6903
6904 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6905 {
6906 #define HCLGE_DEF_VLAN_TYPE             0x8100
6907
6908         struct hnae3_handle *handle = &hdev->vport[0].nic;
6909         struct hclge_vport *vport;
6910         int ret;
6911         int i;
6912
6913         if (hdev->pdev->revision >= 0x21) {
6914                 /* for revision 0x21, vf vlan filter is per function */
6915                 for (i = 0; i < hdev->num_alloc_vport; i++) {
6916                         vport = &hdev->vport[i];
6917                         ret = hclge_set_vlan_filter_ctrl(hdev,
6918                                                          HCLGE_FILTER_TYPE_VF,
6919                                                          HCLGE_FILTER_FE_EGRESS,
6920                                                          true,
6921                                                          vport->vport_id);
6922                         if (ret)
6923                                 return ret;
6924                 }
6925
6926                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6927                                                  HCLGE_FILTER_FE_INGRESS, true,
6928                                                  0);
6929                 if (ret)
6930                         return ret;
6931         } else {
6932                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6933                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
6934                                                  true, 0);
6935                 if (ret)
6936                         return ret;
6937         }
6938
6939         handle->netdev_flags |= HNAE3_VLAN_FLTR;
6940
6941         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6942         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6943         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6944         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6945         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6946         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6947
6948         ret = hclge_set_vlan_protocol_type(hdev);
6949         if (ret)
6950                 return ret;
6951
6952         for (i = 0; i < hdev->num_alloc_vport; i++) {
6953                 u16 vlan_tag;
6954
6955                 vport = &hdev->vport[i];
6956                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
6957
6958                 ret = hclge_vlan_offload_cfg(vport,
6959                                              vport->port_base_vlan_cfg.state,
6960                                              vlan_tag);
6961                 if (ret)
6962                         return ret;
6963         }
6964
6965         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6966 }
6967
6968 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6969                                        bool writen_to_tbl)
6970 {
6971         struct hclge_vport_vlan_cfg *vlan;
6972
6973         /* vlan 0 is reserved */
6974         if (!vlan_id)
6975                 return;
6976
6977         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
6978         if (!vlan)
6979                 return;
6980
6981         vlan->hd_tbl_status = writen_to_tbl;
6982         vlan->vlan_id = vlan_id;
6983
6984         list_add_tail(&vlan->node, &vport->vlan_list);
6985 }
6986
6987 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
6988 {
6989         struct hclge_vport_vlan_cfg *vlan, *tmp;
6990         struct hclge_dev *hdev = vport->back;
6991         int ret;
6992
6993         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6994                 if (!vlan->hd_tbl_status) {
6995                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
6996                                                        vport->vport_id,
6997                                                        vlan->vlan_id, 0, false);
6998                         if (ret) {
6999                                 dev_err(&hdev->pdev->dev,
7000                                         "restore vport vlan list failed, ret=%d\n",
7001                                         ret);
7002                                 return ret;
7003                         }
7004                 }
7005                 vlan->hd_tbl_status = true;
7006         }
7007
7008         return 0;
7009 }
7010
7011 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7012                                       bool is_write_tbl)
7013 {
7014         struct hclge_vport_vlan_cfg *vlan, *tmp;
7015         struct hclge_dev *hdev = vport->back;
7016
7017         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7018                 if (vlan->vlan_id == vlan_id) {
7019                         if (is_write_tbl && vlan->hd_tbl_status)
7020                                 hclge_set_vlan_filter_hw(hdev,
7021                                                          htons(ETH_P_8021Q),
7022                                                          vport->vport_id,
7023                                                          vlan_id, 0,
7024                                                          true);
7025
7026                         list_del(&vlan->node);
7027                         kfree(vlan);
7028                         break;
7029                 }
7030         }
7031 }
7032
7033 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7034 {
7035         struct hclge_vport_vlan_cfg *vlan, *tmp;
7036         struct hclge_dev *hdev = vport->back;
7037
7038         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7039                 if (vlan->hd_tbl_status)
7040                         hclge_set_vlan_filter_hw(hdev,
7041                                                  htons(ETH_P_8021Q),
7042                                                  vport->vport_id,
7043                                                  vlan->vlan_id, 0,
7044                                                  true);
7045
7046                 vlan->hd_tbl_status = false;
7047                 if (is_del_list) {
7048                         list_del(&vlan->node);
7049                         kfree(vlan);
7050                 }
7051         }
7052 }
7053
7054 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7055 {
7056         struct hclge_vport_vlan_cfg *vlan, *tmp;
7057         struct hclge_vport *vport;
7058         int i;
7059
7060         mutex_lock(&hdev->vport_cfg_mutex);
7061         for (i = 0; i < hdev->num_alloc_vport; i++) {
7062                 vport = &hdev->vport[i];
7063                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7064                         list_del(&vlan->node);
7065                         kfree(vlan);
7066                 }
7067         }
7068         mutex_unlock(&hdev->vport_cfg_mutex);
7069 }
7070
7071 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7072 {
7073         struct hclge_vport *vport = hclge_get_vport(handle);
7074
7075         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7076                 vport->rxvlan_cfg.strip_tag1_en = false;
7077                 vport->rxvlan_cfg.strip_tag2_en = enable;
7078         } else {
7079                 vport->rxvlan_cfg.strip_tag1_en = enable;
7080                 vport->rxvlan_cfg.strip_tag2_en = true;
7081         }
7082         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7083         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7084         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7085
7086         return hclge_set_vlan_rx_offload_cfg(vport);
7087 }
7088
7089 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7090                                             u16 port_base_vlan_state,
7091                                             struct hclge_vlan_info *new_info,
7092                                             struct hclge_vlan_info *old_info)
7093 {
7094         struct hclge_dev *hdev = vport->back;
7095         int ret;
7096
7097         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7098                 hclge_rm_vport_all_vlan_table(vport, false);
7099                 return hclge_set_vlan_filter_hw(hdev,
7100                                                  htons(new_info->vlan_proto),
7101                                                  vport->vport_id,
7102                                                  new_info->vlan_tag,
7103                                                  new_info->qos, false);
7104         }
7105
7106         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7107                                        vport->vport_id, old_info->vlan_tag,
7108                                        old_info->qos, true);
7109         if (ret)
7110                 return ret;
7111
7112         return hclge_add_vport_all_vlan_table(vport);
7113 }
7114
7115 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7116                                     struct hclge_vlan_info *vlan_info)
7117 {
7118         struct hnae3_handle *nic = &vport->nic;
7119         struct hclge_vlan_info *old_vlan_info;
7120         struct hclge_dev *hdev = vport->back;
7121         int ret;
7122
7123         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7124
7125         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7126         if (ret)
7127                 return ret;
7128
7129         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7130                 /* add new VLAN tag */
7131                 ret = hclge_set_vlan_filter_hw(hdev,
7132                                                htons(vlan_info->vlan_proto),
7133                                                vport->vport_id,
7134                                                vlan_info->vlan_tag,
7135                                                vlan_info->qos, false);
7136                 if (ret)
7137                         return ret;
7138
7139                 /* remove old VLAN tag */
7140                 ret = hclge_set_vlan_filter_hw(hdev,
7141                                                htons(old_vlan_info->vlan_proto),
7142                                                vport->vport_id,
7143                                                old_vlan_info->vlan_tag,
7144                                                old_vlan_info->qos, true);
7145                 if (ret)
7146                         return ret;
7147
7148                 goto update;
7149         }
7150
7151         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7152                                                old_vlan_info);
7153         if (ret)
7154                 return ret;
7155
7156         /* update state only when disable/enable port based VLAN */
7157         vport->port_base_vlan_cfg.state = state;
7158         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7159                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7160         else
7161                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7162
7163 update:
7164         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7165         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7166         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7167
7168         return 0;
7169 }
7170
7171 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7172                                           enum hnae3_port_base_vlan_state state,
7173                                           u16 vlan)
7174 {
7175         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7176                 if (!vlan)
7177                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7178                 else
7179                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7180         } else {
7181                 if (!vlan)
7182                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7183                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7184                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7185                 else
7186                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7187         }
7188 }
7189
7190 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7191                                     u16 vlan, u8 qos, __be16 proto)
7192 {
7193         struct hclge_vport *vport = hclge_get_vport(handle);
7194         struct hclge_dev *hdev = vport->back;
7195         struct hclge_vlan_info vlan_info;
7196         u16 state;
7197         int ret;
7198
7199         if (hdev->pdev->revision == 0x20)
7200                 return -EOPNOTSUPP;
7201
7202         /* qos is a 3 bits value, so can not be bigger than 7 */
7203         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7204                 return -EINVAL;
7205         if (proto != htons(ETH_P_8021Q))
7206                 return -EPROTONOSUPPORT;
7207
7208         vport = &hdev->vport[vfid];
7209         state = hclge_get_port_base_vlan_state(vport,
7210                                                vport->port_base_vlan_cfg.state,
7211                                                vlan);
7212         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7213                 return 0;
7214
7215         vlan_info.vlan_tag = vlan;
7216         vlan_info.qos = qos;
7217         vlan_info.vlan_proto = ntohs(proto);
7218
7219         /* update port based VLAN for PF */
7220         if (!vfid) {
7221                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7222                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7223                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7224
7225                 return ret;
7226         }
7227
7228         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7229                 return hclge_update_port_base_vlan_cfg(vport, state,
7230                                                        &vlan_info);
7231         } else {
7232                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7233                                                         (u8)vfid, state,
7234                                                         vlan, qos,
7235                                                         ntohs(proto));
7236                 return ret;
7237         }
7238 }
7239
7240 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7241                           u16 vlan_id, bool is_kill)
7242 {
7243         struct hclge_vport *vport = hclge_get_vport(handle);
7244         struct hclge_dev *hdev = vport->back;
7245         bool writen_to_tbl = false;
7246         int ret = 0;
7247
7248         /* when port based VLAN enabled, we use port based VLAN as the VLAN
7249          * filter entry. In this case, we don't update VLAN filter table
7250          * when user add new VLAN or remove exist VLAN, just update the vport
7251          * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7252          * table until port based VLAN disabled
7253          */
7254         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7255                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7256                                                vlan_id, 0, is_kill);
7257                 writen_to_tbl = true;
7258         }
7259
7260         if (ret)
7261                 return ret;
7262
7263         if (is_kill)
7264                 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7265         else
7266                 hclge_add_vport_vlan_table(vport, vlan_id,
7267                                            writen_to_tbl);
7268
7269         return 0;
7270 }
7271
7272 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7273 {
7274         struct hclge_config_max_frm_size_cmd *req;
7275         struct hclge_desc desc;
7276
7277         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7278
7279         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7280         req->max_frm_size = cpu_to_le16(new_mps);
7281         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7282
7283         return hclge_cmd_send(&hdev->hw, &desc, 1);
7284 }
7285
7286 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7287 {
7288         struct hclge_vport *vport = hclge_get_vport(handle);
7289
7290         return hclge_set_vport_mtu(vport, new_mtu);
7291 }
7292
7293 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7294 {
7295         struct hclge_dev *hdev = vport->back;
7296         int i, max_frm_size, ret = 0;
7297
7298         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7299         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7300             max_frm_size > HCLGE_MAC_MAX_FRAME)
7301                 return -EINVAL;
7302
7303         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7304         mutex_lock(&hdev->vport_lock);
7305         /* VF's mps must fit within hdev->mps */
7306         if (vport->vport_id && max_frm_size > hdev->mps) {
7307                 mutex_unlock(&hdev->vport_lock);
7308                 return -EINVAL;
7309         } else if (vport->vport_id) {
7310                 vport->mps = max_frm_size;
7311                 mutex_unlock(&hdev->vport_lock);
7312                 return 0;
7313         }
7314
7315         /* PF's mps must be greater then VF's mps */
7316         for (i = 1; i < hdev->num_alloc_vport; i++)
7317                 if (max_frm_size < hdev->vport[i].mps) {
7318                         mutex_unlock(&hdev->vport_lock);
7319                         return -EINVAL;
7320                 }
7321
7322         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7323
7324         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7325         if (ret) {
7326                 dev_err(&hdev->pdev->dev,
7327                         "Change mtu fail, ret =%d\n", ret);
7328                 goto out;
7329         }
7330
7331         hdev->mps = max_frm_size;
7332         vport->mps = max_frm_size;
7333
7334         ret = hclge_buffer_alloc(hdev);
7335         if (ret)
7336                 dev_err(&hdev->pdev->dev,
7337                         "Allocate buffer fail, ret =%d\n", ret);
7338
7339 out:
7340         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7341         mutex_unlock(&hdev->vport_lock);
7342         return ret;
7343 }
7344
7345 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7346                                     bool enable)
7347 {
7348         struct hclge_reset_tqp_queue_cmd *req;
7349         struct hclge_desc desc;
7350         int ret;
7351
7352         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7353
7354         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7355         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7356         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7357
7358         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7359         if (ret) {
7360                 dev_err(&hdev->pdev->dev,
7361                         "Send tqp reset cmd error, status =%d\n", ret);
7362                 return ret;
7363         }
7364
7365         return 0;
7366 }
7367
7368 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7369 {
7370         struct hclge_reset_tqp_queue_cmd *req;
7371         struct hclge_desc desc;
7372         int ret;
7373
7374         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7375
7376         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7377         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7378
7379         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7380         if (ret) {
7381                 dev_err(&hdev->pdev->dev,
7382                         "Get reset status error, status =%d\n", ret);
7383                 return ret;
7384         }
7385
7386         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7387 }
7388
7389 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7390 {
7391         struct hnae3_queue *queue;
7392         struct hclge_tqp *tqp;
7393
7394         queue = handle->kinfo.tqp[queue_id];
7395         tqp = container_of(queue, struct hclge_tqp, q);
7396
7397         return tqp->index;
7398 }
7399
7400 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7401 {
7402         struct hclge_vport *vport = hclge_get_vport(handle);
7403         struct hclge_dev *hdev = vport->back;
7404         int reset_try_times = 0;
7405         int reset_status;
7406         u16 queue_gid;
7407         int ret = 0;
7408
7409         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7410
7411         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7412         if (ret) {
7413                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7414                 return ret;
7415         }
7416
7417         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7418         if (ret) {
7419                 dev_err(&hdev->pdev->dev,
7420                         "Send reset tqp cmd fail, ret = %d\n", ret);
7421                 return ret;
7422         }
7423
7424         reset_try_times = 0;
7425         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7426                 /* Wait for tqp hw reset */
7427                 msleep(20);
7428                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7429                 if (reset_status)
7430                         break;
7431         }
7432
7433         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7434                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7435                 return ret;
7436         }
7437
7438         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7439         if (ret)
7440                 dev_err(&hdev->pdev->dev,
7441                         "Deassert the soft reset fail, ret = %d\n", ret);
7442
7443         return ret;
7444 }
7445
7446 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7447 {
7448         struct hclge_dev *hdev = vport->back;
7449         int reset_try_times = 0;
7450         int reset_status;
7451         u16 queue_gid;
7452         int ret;
7453
7454         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7455
7456         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7457         if (ret) {
7458                 dev_warn(&hdev->pdev->dev,
7459                          "Send reset tqp cmd fail, ret = %d\n", ret);
7460                 return;
7461         }
7462
7463         reset_try_times = 0;
7464         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7465                 /* Wait for tqp hw reset */
7466                 msleep(20);
7467                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7468                 if (reset_status)
7469                         break;
7470         }
7471
7472         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7473                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7474                 return;
7475         }
7476
7477         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7478         if (ret)
7479                 dev_warn(&hdev->pdev->dev,
7480                          "Deassert the soft reset fail, ret = %d\n", ret);
7481 }
7482
7483 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7484 {
7485         struct hclge_vport *vport = hclge_get_vport(handle);
7486         struct hclge_dev *hdev = vport->back;
7487
7488         return hdev->fw_version;
7489 }
7490
7491 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7492 {
7493         struct phy_device *phydev = hdev->hw.mac.phydev;
7494
7495         if (!phydev)
7496                 return;
7497
7498         phy_set_asym_pause(phydev, rx_en, tx_en);
7499 }
7500
7501 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7502 {
7503         int ret;
7504
7505         if (rx_en && tx_en)
7506                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7507         else if (rx_en && !tx_en)
7508                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7509         else if (!rx_en && tx_en)
7510                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7511         else
7512                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7513
7514         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7515                 return 0;
7516
7517         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7518         if (ret) {
7519                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7520                         ret);
7521                 return ret;
7522         }
7523
7524         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7525
7526         return 0;
7527 }
7528
7529 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7530 {
7531         struct phy_device *phydev = hdev->hw.mac.phydev;
7532         u16 remote_advertising = 0;
7533         u16 local_advertising = 0;
7534         u32 rx_pause, tx_pause;
7535         u8 flowctl;
7536
7537         if (!phydev->link || !phydev->autoneg)
7538                 return 0;
7539
7540         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7541
7542         if (phydev->pause)
7543                 remote_advertising = LPA_PAUSE_CAP;
7544
7545         if (phydev->asym_pause)
7546                 remote_advertising |= LPA_PAUSE_ASYM;
7547
7548         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7549                                            remote_advertising);
7550         tx_pause = flowctl & FLOW_CTRL_TX;
7551         rx_pause = flowctl & FLOW_CTRL_RX;
7552
7553         if (phydev->duplex == HCLGE_MAC_HALF) {
7554                 tx_pause = 0;
7555                 rx_pause = 0;
7556         }
7557
7558         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7559 }
7560
7561 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7562                                  u32 *rx_en, u32 *tx_en)
7563 {
7564         struct hclge_vport *vport = hclge_get_vport(handle);
7565         struct hclge_dev *hdev = vport->back;
7566
7567         *auto_neg = hclge_get_autoneg(handle);
7568
7569         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7570                 *rx_en = 0;
7571                 *tx_en = 0;
7572                 return;
7573         }
7574
7575         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7576                 *rx_en = 1;
7577                 *tx_en = 0;
7578         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7579                 *tx_en = 1;
7580                 *rx_en = 0;
7581         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7582                 *rx_en = 1;
7583                 *tx_en = 1;
7584         } else {
7585                 *rx_en = 0;
7586                 *tx_en = 0;
7587         }
7588 }
7589
7590 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7591                                 u32 rx_en, u32 tx_en)
7592 {
7593         struct hclge_vport *vport = hclge_get_vport(handle);
7594         struct hclge_dev *hdev = vport->back;
7595         struct phy_device *phydev = hdev->hw.mac.phydev;
7596         u32 fc_autoneg;
7597
7598         fc_autoneg = hclge_get_autoneg(handle);
7599         if (auto_neg != fc_autoneg) {
7600                 dev_info(&hdev->pdev->dev,
7601                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7602                 return -EOPNOTSUPP;
7603         }
7604
7605         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7606                 dev_info(&hdev->pdev->dev,
7607                          "Priority flow control enabled. Cannot set link flow control.\n");
7608                 return -EOPNOTSUPP;
7609         }
7610
7611         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7612
7613         if (!fc_autoneg)
7614                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7615
7616         /* Only support flow control negotiation for netdev with
7617          * phy attached for now.
7618          */
7619         if (!phydev)
7620                 return -EOPNOTSUPP;
7621
7622         return phy_start_aneg(phydev);
7623 }
7624
7625 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7626                                           u8 *auto_neg, u32 *speed, u8 *duplex)
7627 {
7628         struct hclge_vport *vport = hclge_get_vport(handle);
7629         struct hclge_dev *hdev = vport->back;
7630
7631         if (speed)
7632                 *speed = hdev->hw.mac.speed;
7633         if (duplex)
7634                 *duplex = hdev->hw.mac.duplex;
7635         if (auto_neg)
7636                 *auto_neg = hdev->hw.mac.autoneg;
7637 }
7638
7639 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
7640                                  u8 *module_type)
7641 {
7642         struct hclge_vport *vport = hclge_get_vport(handle);
7643         struct hclge_dev *hdev = vport->back;
7644
7645         if (media_type)
7646                 *media_type = hdev->hw.mac.media_type;
7647
7648         if (module_type)
7649                 *module_type = hdev->hw.mac.module_type;
7650 }
7651
7652 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7653                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7654 {
7655         struct hclge_vport *vport = hclge_get_vport(handle);
7656         struct hclge_dev *hdev = vport->back;
7657         struct phy_device *phydev = hdev->hw.mac.phydev;
7658         int mdix_ctrl, mdix, retval, is_resolved;
7659
7660         if (!phydev) {
7661                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7662                 *tp_mdix = ETH_TP_MDI_INVALID;
7663                 return;
7664         }
7665
7666         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7667
7668         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7669         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7670                                     HCLGE_PHY_MDIX_CTRL_S);
7671
7672         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7673         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7674         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7675
7676         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7677
7678         switch (mdix_ctrl) {
7679         case 0x0:
7680                 *tp_mdix_ctrl = ETH_TP_MDI;
7681                 break;
7682         case 0x1:
7683                 *tp_mdix_ctrl = ETH_TP_MDI_X;
7684                 break;
7685         case 0x3:
7686                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7687                 break;
7688         default:
7689                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7690                 break;
7691         }
7692
7693         if (!is_resolved)
7694                 *tp_mdix = ETH_TP_MDI_INVALID;
7695         else if (mdix)
7696                 *tp_mdix = ETH_TP_MDI_X;
7697         else
7698                 *tp_mdix = ETH_TP_MDI;
7699 }
7700
7701 static void hclge_info_show(struct hclge_dev *hdev)
7702 {
7703         struct device *dev = &hdev->pdev->dev;
7704
7705         dev_info(dev, "PF info begin:\n");
7706
7707         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
7708         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
7709         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
7710         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
7711         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
7712         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
7713         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
7714         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
7715         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
7716         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
7717         dev_info(dev, "This is %s PF\n",
7718                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
7719         dev_info(dev, "DCB %s\n",
7720                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
7721         dev_info(dev, "MQPRIO %s\n",
7722                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
7723
7724         dev_info(dev, "PF info end.\n");
7725 }
7726
7727 static int hclge_init_client_instance(struct hnae3_client *client,
7728                                       struct hnae3_ae_dev *ae_dev)
7729 {
7730         struct hclge_dev *hdev = ae_dev->priv;
7731         struct hclge_vport *vport;
7732         int i, ret;
7733
7734         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
7735                 vport = &hdev->vport[i];
7736
7737                 switch (client->type) {
7738                 case HNAE3_CLIENT_KNIC:
7739
7740                         hdev->nic_client = client;
7741                         vport->nic.client = client;
7742                         ret = client->ops->init_instance(&vport->nic);
7743                         if (ret)
7744                                 goto clear_nic;
7745
7746                         hnae3_set_client_init_flag(client, ae_dev, 1);
7747
7748                         if (netif_msg_drv(&hdev->vport->nic))
7749                                 hclge_info_show(hdev);
7750
7751                         if (hdev->roce_client &&
7752                             hnae3_dev_roce_supported(hdev)) {
7753                                 struct hnae3_client *rc = hdev->roce_client;
7754
7755                                 ret = hclge_init_roce_base_info(vport);
7756                                 if (ret)
7757                                         goto clear_roce;
7758
7759                                 ret = rc->ops->init_instance(&vport->roce);
7760                                 if (ret)
7761                                         goto clear_roce;
7762
7763                                 hnae3_set_client_init_flag(hdev->roce_client,
7764                                                            ae_dev, 1);
7765                         }
7766
7767                         break;
7768                 case HNAE3_CLIENT_UNIC:
7769                         hdev->nic_client = client;
7770                         vport->nic.client = client;
7771
7772                         ret = client->ops->init_instance(&vport->nic);
7773                         if (ret)
7774                                 goto clear_nic;
7775
7776                         hnae3_set_client_init_flag(client, ae_dev, 1);
7777
7778                         break;
7779                 case HNAE3_CLIENT_ROCE:
7780                         if (hnae3_dev_roce_supported(hdev)) {
7781                                 hdev->roce_client = client;
7782                                 vport->roce.client = client;
7783                         }
7784
7785                         if (hdev->roce_client && hdev->nic_client) {
7786                                 ret = hclge_init_roce_base_info(vport);
7787                                 if (ret)
7788                                         goto clear_roce;
7789
7790                                 ret = client->ops->init_instance(&vport->roce);
7791                                 if (ret)
7792                                         goto clear_roce;
7793
7794                                 hnae3_set_client_init_flag(client, ae_dev, 1);
7795                         }
7796
7797                         break;
7798                 default:
7799                         return -EINVAL;
7800                 }
7801         }
7802
7803         return 0;
7804
7805 clear_nic:
7806         hdev->nic_client = NULL;
7807         vport->nic.client = NULL;
7808         return ret;
7809 clear_roce:
7810         hdev->roce_client = NULL;
7811         vport->roce.client = NULL;
7812         return ret;
7813 }
7814
7815 static void hclge_uninit_client_instance(struct hnae3_client *client,
7816                                          struct hnae3_ae_dev *ae_dev)
7817 {
7818         struct hclge_dev *hdev = ae_dev->priv;
7819         struct hclge_vport *vport;
7820         int i;
7821
7822         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7823                 vport = &hdev->vport[i];
7824                 if (hdev->roce_client) {
7825                         hdev->roce_client->ops->uninit_instance(&vport->roce,
7826                                                                 0);
7827                         hdev->roce_client = NULL;
7828                         vport->roce.client = NULL;
7829                 }
7830                 if (client->type == HNAE3_CLIENT_ROCE)
7831                         return;
7832                 if (hdev->nic_client && client->ops->uninit_instance) {
7833                         client->ops->uninit_instance(&vport->nic, 0);
7834                         hdev->nic_client = NULL;
7835                         vport->nic.client = NULL;
7836                 }
7837         }
7838 }
7839
7840 static int hclge_pci_init(struct hclge_dev *hdev)
7841 {
7842         struct pci_dev *pdev = hdev->pdev;
7843         struct hclge_hw *hw;
7844         int ret;
7845
7846         ret = pci_enable_device(pdev);
7847         if (ret) {
7848                 dev_err(&pdev->dev, "failed to enable PCI device\n");
7849                 return ret;
7850         }
7851
7852         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7853         if (ret) {
7854                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7855                 if (ret) {
7856                         dev_err(&pdev->dev,
7857                                 "can't set consistent PCI DMA");
7858                         goto err_disable_device;
7859                 }
7860                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7861         }
7862
7863         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7864         if (ret) {
7865                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7866                 goto err_disable_device;
7867         }
7868
7869         pci_set_master(pdev);
7870         hw = &hdev->hw;
7871         hw->io_base = pcim_iomap(pdev, 2, 0);
7872         if (!hw->io_base) {
7873                 dev_err(&pdev->dev, "Can't map configuration register space\n");
7874                 ret = -ENOMEM;
7875                 goto err_clr_master;
7876         }
7877
7878         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7879
7880         return 0;
7881 err_clr_master:
7882         pci_clear_master(pdev);
7883         pci_release_regions(pdev);
7884 err_disable_device:
7885         pci_disable_device(pdev);
7886
7887         return ret;
7888 }
7889
7890 static void hclge_pci_uninit(struct hclge_dev *hdev)
7891 {
7892         struct pci_dev *pdev = hdev->pdev;
7893
7894         pcim_iounmap(pdev, hdev->hw.io_base);
7895         pci_free_irq_vectors(pdev);
7896         pci_clear_master(pdev);
7897         pci_release_mem_regions(pdev);
7898         pci_disable_device(pdev);
7899 }
7900
7901 static void hclge_state_init(struct hclge_dev *hdev)
7902 {
7903         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7904         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7905         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7906         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7907         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7908         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7909 }
7910
7911 static void hclge_state_uninit(struct hclge_dev *hdev)
7912 {
7913         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7914
7915         if (hdev->service_timer.function)
7916                 del_timer_sync(&hdev->service_timer);
7917         if (hdev->reset_timer.function)
7918                 del_timer_sync(&hdev->reset_timer);
7919         if (hdev->service_task.func)
7920                 cancel_work_sync(&hdev->service_task);
7921         if (hdev->rst_service_task.func)
7922                 cancel_work_sync(&hdev->rst_service_task);
7923         if (hdev->mbx_service_task.func)
7924                 cancel_work_sync(&hdev->mbx_service_task);
7925 }
7926
7927 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7928 {
7929 #define HCLGE_FLR_WAIT_MS       100
7930 #define HCLGE_FLR_WAIT_CNT      50
7931         struct hclge_dev *hdev = ae_dev->priv;
7932         int cnt = 0;
7933
7934         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7935         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7936         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7937         hclge_reset_event(hdev->pdev, NULL);
7938
7939         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7940                cnt++ < HCLGE_FLR_WAIT_CNT)
7941                 msleep(HCLGE_FLR_WAIT_MS);
7942
7943         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7944                 dev_err(&hdev->pdev->dev,
7945                         "flr wait down timeout: %d\n", cnt);
7946 }
7947
7948 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7949 {
7950         struct hclge_dev *hdev = ae_dev->priv;
7951
7952         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7953 }
7954
7955 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7956 {
7957         struct pci_dev *pdev = ae_dev->pdev;
7958         struct hclge_dev *hdev;
7959         int ret;
7960
7961         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7962         if (!hdev) {
7963                 ret = -ENOMEM;
7964                 goto out;
7965         }
7966
7967         hdev->pdev = pdev;
7968         hdev->ae_dev = ae_dev;
7969         hdev->reset_type = HNAE3_NONE_RESET;
7970         hdev->reset_level = HNAE3_FUNC_RESET;
7971         ae_dev->priv = hdev;
7972         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7973
7974         mutex_init(&hdev->vport_lock);
7975         mutex_init(&hdev->vport_cfg_mutex);
7976
7977         ret = hclge_pci_init(hdev);
7978         if (ret) {
7979                 dev_err(&pdev->dev, "PCI init failed\n");
7980                 goto out;
7981         }
7982
7983         /* Firmware command queue initialize */
7984         ret = hclge_cmd_queue_init(hdev);
7985         if (ret) {
7986                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7987                 goto err_pci_uninit;
7988         }
7989
7990         /* Firmware command initialize */
7991         ret = hclge_cmd_init(hdev);
7992         if (ret)
7993                 goto err_cmd_uninit;
7994
7995         ret = hclge_get_cap(hdev);
7996         if (ret) {
7997                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7998                         ret);
7999                 goto err_cmd_uninit;
8000         }
8001
8002         ret = hclge_configure(hdev);
8003         if (ret) {
8004                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8005                 goto err_cmd_uninit;
8006         }
8007
8008         ret = hclge_init_msi(hdev);
8009         if (ret) {
8010                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8011                 goto err_cmd_uninit;
8012         }
8013
8014         ret = hclge_misc_irq_init(hdev);
8015         if (ret) {
8016                 dev_err(&pdev->dev,
8017                         "Misc IRQ(vector0) init error, ret = %d.\n",
8018                         ret);
8019                 goto err_msi_uninit;
8020         }
8021
8022         ret = hclge_alloc_tqps(hdev);
8023         if (ret) {
8024                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8025                 goto err_msi_irq_uninit;
8026         }
8027
8028         ret = hclge_alloc_vport(hdev);
8029         if (ret) {
8030                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8031                 goto err_msi_irq_uninit;
8032         }
8033
8034         ret = hclge_map_tqp(hdev);
8035         if (ret) {
8036                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8037                 goto err_msi_irq_uninit;
8038         }
8039
8040         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8041                 ret = hclge_mac_mdio_config(hdev);
8042                 if (ret) {
8043                         dev_err(&hdev->pdev->dev,
8044                                 "mdio config fail ret=%d\n", ret);
8045                         goto err_msi_irq_uninit;
8046                 }
8047         }
8048
8049         ret = hclge_init_umv_space(hdev);
8050         if (ret) {
8051                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8052                 goto err_mdiobus_unreg;
8053         }
8054
8055         ret = hclge_mac_init(hdev);
8056         if (ret) {
8057                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8058                 goto err_mdiobus_unreg;
8059         }
8060
8061         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8062         if (ret) {
8063                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8064                 goto err_mdiobus_unreg;
8065         }
8066
8067         ret = hclge_config_gro(hdev, true);
8068         if (ret)
8069                 goto err_mdiobus_unreg;
8070
8071         ret = hclge_init_vlan_config(hdev);
8072         if (ret) {
8073                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8074                 goto err_mdiobus_unreg;
8075         }
8076
8077         ret = hclge_tm_schd_init(hdev);
8078         if (ret) {
8079                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8080                 goto err_mdiobus_unreg;
8081         }
8082
8083         hclge_rss_init_cfg(hdev);
8084         ret = hclge_rss_init_hw(hdev);
8085         if (ret) {
8086                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8087                 goto err_mdiobus_unreg;
8088         }
8089
8090         ret = init_mgr_tbl(hdev);
8091         if (ret) {
8092                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8093                 goto err_mdiobus_unreg;
8094         }
8095
8096         ret = hclge_init_fd_config(hdev);
8097         if (ret) {
8098                 dev_err(&pdev->dev,
8099                         "fd table init fail, ret=%d\n", ret);
8100                 goto err_mdiobus_unreg;
8101         }
8102
8103         ret = hclge_hw_error_set_state(hdev, true);
8104         if (ret) {
8105                 dev_err(&pdev->dev,
8106                         "fail(%d) to enable hw error interrupts\n", ret);
8107                 goto err_mdiobus_unreg;
8108         }
8109
8110         INIT_KFIFO(hdev->mac_tnl_log);
8111
8112         hclge_dcb_ops_set(hdev);
8113
8114         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8115         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8116         INIT_WORK(&hdev->service_task, hclge_service_task);
8117         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8118         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8119
8120         hclge_clear_all_event_cause(hdev);
8121
8122         /* Enable MISC vector(vector0) */
8123         hclge_enable_vector(&hdev->misc_vector, true);
8124
8125         hclge_state_init(hdev);
8126         hdev->last_reset_time = jiffies;
8127
8128         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8129         return 0;
8130
8131 err_mdiobus_unreg:
8132         if (hdev->hw.mac.phydev)
8133                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8134 err_msi_irq_uninit:
8135         hclge_misc_irq_uninit(hdev);
8136 err_msi_uninit:
8137         pci_free_irq_vectors(pdev);
8138 err_cmd_uninit:
8139         hclge_cmd_uninit(hdev);
8140 err_pci_uninit:
8141         pcim_iounmap(pdev, hdev->hw.io_base);
8142         pci_clear_master(pdev);
8143         pci_release_regions(pdev);
8144         pci_disable_device(pdev);
8145 out:
8146         return ret;
8147 }
8148
8149 static void hclge_stats_clear(struct hclge_dev *hdev)
8150 {
8151         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8152 }
8153
8154 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8155 {
8156         struct hclge_vport *vport = hdev->vport;
8157         int i;
8158
8159         for (i = 0; i < hdev->num_alloc_vport; i++) {
8160                 hclge_vport_stop(vport);
8161                 vport++;
8162         }
8163 }
8164
8165 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8166 {
8167         struct hclge_dev *hdev = ae_dev->priv;
8168         struct pci_dev *pdev = ae_dev->pdev;
8169         int ret;
8170
8171         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8172
8173         hclge_stats_clear(hdev);
8174         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8175
8176         ret = hclge_cmd_init(hdev);
8177         if (ret) {
8178                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8179                 return ret;
8180         }
8181
8182         ret = hclge_map_tqp(hdev);
8183         if (ret) {
8184                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8185                 return ret;
8186         }
8187
8188         hclge_reset_umv_space(hdev);
8189
8190         ret = hclge_mac_init(hdev);
8191         if (ret) {
8192                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8193                 return ret;
8194         }
8195
8196         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8197         if (ret) {
8198                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8199                 return ret;
8200         }
8201
8202         ret = hclge_config_gro(hdev, true);
8203         if (ret)
8204                 return ret;
8205
8206         ret = hclge_init_vlan_config(hdev);
8207         if (ret) {
8208                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8209                 return ret;
8210         }
8211
8212         ret = hclge_tm_init_hw(hdev, true);
8213         if (ret) {
8214                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8215                 return ret;
8216         }
8217
8218         ret = hclge_rss_init_hw(hdev);
8219         if (ret) {
8220                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8221                 return ret;
8222         }
8223
8224         ret = hclge_init_fd_config(hdev);
8225         if (ret) {
8226                 dev_err(&pdev->dev,
8227                         "fd table init fail, ret=%d\n", ret);
8228                 return ret;
8229         }
8230
8231         /* Re-enable the hw error interrupts because
8232          * the interrupts get disabled on core/global reset.
8233          */
8234         ret = hclge_hw_error_set_state(hdev, true);
8235         if (ret) {
8236                 dev_err(&pdev->dev,
8237                         "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8238                 return ret;
8239         }
8240
8241         hclge_reset_vport_state(hdev);
8242
8243         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8244                  HCLGE_DRIVER_NAME);
8245
8246         return 0;
8247 }
8248
8249 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8250 {
8251         struct hclge_dev *hdev = ae_dev->priv;
8252         struct hclge_mac *mac = &hdev->hw.mac;
8253
8254         hclge_state_uninit(hdev);
8255
8256         if (mac->phydev)
8257                 mdiobus_unregister(mac->mdio_bus);
8258
8259         hclge_uninit_umv_space(hdev);
8260
8261         /* Disable MISC vector(vector0) */
8262         hclge_enable_vector(&hdev->misc_vector, false);
8263         synchronize_irq(hdev->misc_vector.vector_irq);
8264
8265         hclge_config_mac_tnl_int(hdev, false);
8266         hclge_hw_error_set_state(hdev, false);
8267         hclge_cmd_uninit(hdev);
8268         hclge_misc_irq_uninit(hdev);
8269         hclge_pci_uninit(hdev);
8270         mutex_destroy(&hdev->vport_lock);
8271         hclge_uninit_vport_mac_table(hdev);
8272         hclge_uninit_vport_vlan_table(hdev);
8273         mutex_destroy(&hdev->vport_cfg_mutex);
8274         ae_dev->priv = NULL;
8275 }
8276
8277 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8278 {
8279         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8280         struct hclge_vport *vport = hclge_get_vport(handle);
8281         struct hclge_dev *hdev = vport->back;
8282
8283         return min_t(u32, hdev->rss_size_max,
8284                      vport->alloc_tqps / kinfo->num_tc);
8285 }
8286
8287 static void hclge_get_channels(struct hnae3_handle *handle,
8288                                struct ethtool_channels *ch)
8289 {
8290         ch->max_combined = hclge_get_max_channels(handle);
8291         ch->other_count = 1;
8292         ch->max_other = 1;
8293         ch->combined_count = handle->kinfo.rss_size;
8294 }
8295
8296 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8297                                         u16 *alloc_tqps, u16 *max_rss_size)
8298 {
8299         struct hclge_vport *vport = hclge_get_vport(handle);
8300         struct hclge_dev *hdev = vport->back;
8301
8302         *alloc_tqps = vport->alloc_tqps;
8303         *max_rss_size = hdev->rss_size_max;
8304 }
8305
8306 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8307                               bool rxfh_configured)
8308 {
8309         struct hclge_vport *vport = hclge_get_vport(handle);
8310         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8311         struct hclge_dev *hdev = vport->back;
8312         int cur_rss_size = kinfo->rss_size;
8313         int cur_tqps = kinfo->num_tqps;
8314         u16 tc_offset[HCLGE_MAX_TC_NUM];
8315         u16 tc_valid[HCLGE_MAX_TC_NUM];
8316         u16 tc_size[HCLGE_MAX_TC_NUM];
8317         u16 roundup_size;
8318         u32 *rss_indir;
8319         int ret, i;
8320
8321         kinfo->req_rss_size = new_tqps_num;
8322
8323         ret = hclge_tm_vport_map_update(hdev);
8324         if (ret) {
8325                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8326                 return ret;
8327         }
8328
8329         roundup_size = roundup_pow_of_two(kinfo->rss_size);
8330         roundup_size = ilog2(roundup_size);
8331         /* Set the RSS TC mode according to the new RSS size */
8332         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8333                 tc_valid[i] = 0;
8334
8335                 if (!(hdev->hw_tc_map & BIT(i)))
8336                         continue;
8337
8338                 tc_valid[i] = 1;
8339                 tc_size[i] = roundup_size;
8340                 tc_offset[i] = kinfo->rss_size * i;
8341         }
8342         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8343         if (ret)
8344                 return ret;
8345
8346         /* RSS indirection table has been configuared by user */
8347         if (rxfh_configured)
8348                 goto out;
8349
8350         /* Reinitializes the rss indirect table according to the new RSS size */
8351         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8352         if (!rss_indir)
8353                 return -ENOMEM;
8354
8355         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8356                 rss_indir[i] = i % kinfo->rss_size;
8357
8358         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8359         if (ret)
8360                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8361                         ret);
8362
8363         kfree(rss_indir);
8364
8365 out:
8366         if (!ret)
8367                 dev_info(&hdev->pdev->dev,
8368                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8369                          cur_rss_size, kinfo->rss_size,
8370                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
8371
8372         return ret;
8373 }
8374
8375 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8376                               u32 *regs_num_64_bit)
8377 {
8378         struct hclge_desc desc;
8379         u32 total_num;
8380         int ret;
8381
8382         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8383         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8384         if (ret) {
8385                 dev_err(&hdev->pdev->dev,
8386                         "Query register number cmd failed, ret = %d.\n", ret);
8387                 return ret;
8388         }
8389
8390         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8391         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8392
8393         total_num = *regs_num_32_bit + *regs_num_64_bit;
8394         if (!total_num)
8395                 return -EINVAL;
8396
8397         return 0;
8398 }
8399
8400 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8401                                  void *data)
8402 {
8403 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8404
8405         struct hclge_desc *desc;
8406         u32 *reg_val = data;
8407         __le32 *desc_data;
8408         int cmd_num;
8409         int i, k, n;
8410         int ret;
8411
8412         if (regs_num == 0)
8413                 return 0;
8414
8415         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8416         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8417         if (!desc)
8418                 return -ENOMEM;
8419
8420         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8421         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8422         if (ret) {
8423                 dev_err(&hdev->pdev->dev,
8424                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8425                 kfree(desc);
8426                 return ret;
8427         }
8428
8429         for (i = 0; i < cmd_num; i++) {
8430                 if (i == 0) {
8431                         desc_data = (__le32 *)(&desc[i].data[0]);
8432                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8433                 } else {
8434                         desc_data = (__le32 *)(&desc[i]);
8435                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8436                 }
8437                 for (k = 0; k < n; k++) {
8438                         *reg_val++ = le32_to_cpu(*desc_data++);
8439
8440                         regs_num--;
8441                         if (!regs_num)
8442                                 break;
8443                 }
8444         }
8445
8446         kfree(desc);
8447         return 0;
8448 }
8449
8450 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8451                                  void *data)
8452 {
8453 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8454
8455         struct hclge_desc *desc;
8456         u64 *reg_val = data;
8457         __le64 *desc_data;
8458         int cmd_num;
8459         int i, k, n;
8460         int ret;
8461
8462         if (regs_num == 0)
8463                 return 0;
8464
8465         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8466         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8467         if (!desc)
8468                 return -ENOMEM;
8469
8470         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8471         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8472         if (ret) {
8473                 dev_err(&hdev->pdev->dev,
8474                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
8475                 kfree(desc);
8476                 return ret;
8477         }
8478
8479         for (i = 0; i < cmd_num; i++) {
8480                 if (i == 0) {
8481                         desc_data = (__le64 *)(&desc[i].data[0]);
8482                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8483                 } else {
8484                         desc_data = (__le64 *)(&desc[i]);
8485                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
8486                 }
8487                 for (k = 0; k < n; k++) {
8488                         *reg_val++ = le64_to_cpu(*desc_data++);
8489
8490                         regs_num--;
8491                         if (!regs_num)
8492                                 break;
8493                 }
8494         }
8495
8496         kfree(desc);
8497         return 0;
8498 }
8499
8500 #define MAX_SEPARATE_NUM        4
8501 #define SEPARATOR_VALUE         0xFFFFFFFF
8502 #define REG_NUM_PER_LINE        4
8503 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
8504
8505 static int hclge_get_regs_len(struct hnae3_handle *handle)
8506 {
8507         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8508         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8509         struct hclge_vport *vport = hclge_get_vport(handle);
8510         struct hclge_dev *hdev = vport->back;
8511         u32 regs_num_32_bit, regs_num_64_bit;
8512         int ret;
8513
8514         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8515         if (ret) {
8516                 dev_err(&hdev->pdev->dev,
8517                         "Get register number failed, ret = %d.\n", ret);
8518                 return -EOPNOTSUPP;
8519         }
8520
8521         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8522         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8523         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8524         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8525
8526         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8527                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8528                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8529 }
8530
8531 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8532                            void *data)
8533 {
8534         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8535         struct hclge_vport *vport = hclge_get_vport(handle);
8536         struct hclge_dev *hdev = vport->back;
8537         u32 regs_num_32_bit, regs_num_64_bit;
8538         int i, j, reg_um, separator_num;
8539         u32 *reg = data;
8540         int ret;
8541
8542         *version = hdev->fw_version;
8543
8544         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8545         if (ret) {
8546                 dev_err(&hdev->pdev->dev,
8547                         "Get register number failed, ret = %d.\n", ret);
8548                 return;
8549         }
8550
8551         /* fetching per-PF registers valus from PF PCIe register space */
8552         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8553         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8554         for (i = 0; i < reg_um; i++)
8555                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8556         for (i = 0; i < separator_num; i++)
8557                 *reg++ = SEPARATOR_VALUE;
8558
8559         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8560         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8561         for (i = 0; i < reg_um; i++)
8562                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8563         for (i = 0; i < separator_num; i++)
8564                 *reg++ = SEPARATOR_VALUE;
8565
8566         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8567         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8568         for (j = 0; j < kinfo->num_tqps; j++) {
8569                 for (i = 0; i < reg_um; i++)
8570                         *reg++ = hclge_read_dev(&hdev->hw,
8571                                                 ring_reg_addr_list[i] +
8572                                                 0x200 * j);
8573                 for (i = 0; i < separator_num; i++)
8574                         *reg++ = SEPARATOR_VALUE;
8575         }
8576
8577         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8578         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8579         for (j = 0; j < hdev->num_msi_used - 1; j++) {
8580                 for (i = 0; i < reg_um; i++)
8581                         *reg++ = hclge_read_dev(&hdev->hw,
8582                                                 tqp_intr_reg_addr_list[i] +
8583                                                 4 * j);
8584                 for (i = 0; i < separator_num; i++)
8585                         *reg++ = SEPARATOR_VALUE;
8586         }
8587
8588         /* fetching PF common registers values from firmware */
8589         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8590         if (ret) {
8591                 dev_err(&hdev->pdev->dev,
8592                         "Get 32 bit register failed, ret = %d.\n", ret);
8593                 return;
8594         }
8595
8596         reg += regs_num_32_bit;
8597         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8598         if (ret)
8599                 dev_err(&hdev->pdev->dev,
8600                         "Get 64 bit register failed, ret = %d.\n", ret);
8601 }
8602
8603 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8604 {
8605         struct hclge_set_led_state_cmd *req;
8606         struct hclge_desc desc;
8607         int ret;
8608
8609         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8610
8611         req = (struct hclge_set_led_state_cmd *)desc.data;
8612         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8613                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8614
8615         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8616         if (ret)
8617                 dev_err(&hdev->pdev->dev,
8618                         "Send set led state cmd error, ret =%d\n", ret);
8619
8620         return ret;
8621 }
8622
8623 enum hclge_led_status {
8624         HCLGE_LED_OFF,
8625         HCLGE_LED_ON,
8626         HCLGE_LED_NO_CHANGE = 0xFF,
8627 };
8628
8629 static int hclge_set_led_id(struct hnae3_handle *handle,
8630                             enum ethtool_phys_id_state status)
8631 {
8632         struct hclge_vport *vport = hclge_get_vport(handle);
8633         struct hclge_dev *hdev = vport->back;
8634
8635         switch (status) {
8636         case ETHTOOL_ID_ACTIVE:
8637                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
8638         case ETHTOOL_ID_INACTIVE:
8639                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8640         default:
8641                 return -EINVAL;
8642         }
8643 }
8644
8645 static void hclge_get_link_mode(struct hnae3_handle *handle,
8646                                 unsigned long *supported,
8647                                 unsigned long *advertising)
8648 {
8649         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8650         struct hclge_vport *vport = hclge_get_vport(handle);
8651         struct hclge_dev *hdev = vport->back;
8652         unsigned int idx = 0;
8653
8654         for (; idx < size; idx++) {
8655                 supported[idx] = hdev->hw.mac.supported[idx];
8656                 advertising[idx] = hdev->hw.mac.advertising[idx];
8657         }
8658 }
8659
8660 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8661 {
8662         struct hclge_vport *vport = hclge_get_vport(handle);
8663         struct hclge_dev *hdev = vport->back;
8664
8665         return hclge_config_gro(hdev, enable);
8666 }
8667
8668 static const struct hnae3_ae_ops hclge_ops = {
8669         .init_ae_dev = hclge_init_ae_dev,
8670         .uninit_ae_dev = hclge_uninit_ae_dev,
8671         .flr_prepare = hclge_flr_prepare,
8672         .flr_done = hclge_flr_done,
8673         .init_client_instance = hclge_init_client_instance,
8674         .uninit_client_instance = hclge_uninit_client_instance,
8675         .map_ring_to_vector = hclge_map_ring_to_vector,
8676         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8677         .get_vector = hclge_get_vector,
8678         .put_vector = hclge_put_vector,
8679         .set_promisc_mode = hclge_set_promisc_mode,
8680         .set_loopback = hclge_set_loopback,
8681         .start = hclge_ae_start,
8682         .stop = hclge_ae_stop,
8683         .client_start = hclge_client_start,
8684         .client_stop = hclge_client_stop,
8685         .get_status = hclge_get_status,
8686         .get_ksettings_an_result = hclge_get_ksettings_an_result,
8687         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8688         .get_media_type = hclge_get_media_type,
8689         .get_rss_key_size = hclge_get_rss_key_size,
8690         .get_rss_indir_size = hclge_get_rss_indir_size,
8691         .get_rss = hclge_get_rss,
8692         .set_rss = hclge_set_rss,
8693         .set_rss_tuple = hclge_set_rss_tuple,
8694         .get_rss_tuple = hclge_get_rss_tuple,
8695         .get_tc_size = hclge_get_tc_size,
8696         .get_mac_addr = hclge_get_mac_addr,
8697         .set_mac_addr = hclge_set_mac_addr,
8698         .do_ioctl = hclge_do_ioctl,
8699         .add_uc_addr = hclge_add_uc_addr,
8700         .rm_uc_addr = hclge_rm_uc_addr,
8701         .add_mc_addr = hclge_add_mc_addr,
8702         .rm_mc_addr = hclge_rm_mc_addr,
8703         .set_autoneg = hclge_set_autoneg,
8704         .get_autoneg = hclge_get_autoneg,
8705         .get_pauseparam = hclge_get_pauseparam,
8706         .set_pauseparam = hclge_set_pauseparam,
8707         .set_mtu = hclge_set_mtu,
8708         .reset_queue = hclge_reset_tqp,
8709         .get_stats = hclge_get_stats,
8710         .get_mac_pause_stats = hclge_get_mac_pause_stat,
8711         .update_stats = hclge_update_stats,
8712         .get_strings = hclge_get_strings,
8713         .get_sset_count = hclge_get_sset_count,
8714         .get_fw_version = hclge_get_fw_version,
8715         .get_mdix_mode = hclge_get_mdix_mode,
8716         .enable_vlan_filter = hclge_enable_vlan_filter,
8717         .set_vlan_filter = hclge_set_vlan_filter,
8718         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8719         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8720         .reset_event = hclge_reset_event,
8721         .set_default_reset_request = hclge_set_def_reset_request,
8722         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8723         .set_channels = hclge_set_channels,
8724         .get_channels = hclge_get_channels,
8725         .get_regs_len = hclge_get_regs_len,
8726         .get_regs = hclge_get_regs,
8727         .set_led_id = hclge_set_led_id,
8728         .get_link_mode = hclge_get_link_mode,
8729         .add_fd_entry = hclge_add_fd_entry,
8730         .del_fd_entry = hclge_del_fd_entry,
8731         .del_all_fd_entries = hclge_del_all_fd_entries,
8732         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8733         .get_fd_rule_info = hclge_get_fd_rule_info,
8734         .get_fd_all_rules = hclge_get_all_rules,
8735         .restore_fd_rules = hclge_restore_fd_entries,
8736         .enable_fd = hclge_enable_fd,
8737         .dbg_run_cmd = hclge_dbg_run_cmd,
8738         .handle_hw_ras_error = hclge_handle_hw_ras_error,
8739         .get_hw_reset_stat = hclge_get_hw_reset_stat,
8740         .ae_dev_resetting = hclge_ae_dev_resetting,
8741         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8742         .set_gro_en = hclge_gro_en,
8743         .get_global_queue_id = hclge_covert_handle_qid_global,
8744         .set_timer_task = hclge_set_timer_task,
8745         .mac_connect_phy = hclge_mac_connect_phy,
8746         .mac_disconnect_phy = hclge_mac_disconnect_phy,
8747 };
8748
8749 static struct hnae3_ae_algo ae_algo = {
8750         .ops = &hclge_ops,
8751         .pdev_id_table = ae_algo_pci_tbl,
8752 };
8753
8754 static int hclge_init(void)
8755 {
8756         pr_info("%s is initializing\n", HCLGE_NAME);
8757
8758         hnae3_register_ae_algo(&ae_algo);
8759
8760         return 0;
8761 }
8762
8763 static void hclge_exit(void)
8764 {
8765         hnae3_unregister_ae_algo(&ae_algo);
8766 }
8767 module_init(hclge_init);
8768 module_exit(hclge_exit);
8769
8770 MODULE_LICENSE("GPL");
8771 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8772 MODULE_DESCRIPTION("HCLGE Driver");
8773 MODULE_VERSION(HCLGE_MOD_VERSION);