net: hns3: add autoneg and change speed support for fibre port
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37                                u16 *allocated_size, bool is_alloc);
38
39 static struct hnae3_ae_algo ae_algo;
40
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49         /* required last entry */
50         {0, }
51 };
52
53 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
54
55 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56                                          HCLGE_CMDQ_TX_ADDR_H_REG,
57                                          HCLGE_CMDQ_TX_DEPTH_REG,
58                                          HCLGE_CMDQ_TX_TAIL_REG,
59                                          HCLGE_CMDQ_TX_HEAD_REG,
60                                          HCLGE_CMDQ_RX_ADDR_L_REG,
61                                          HCLGE_CMDQ_RX_ADDR_H_REG,
62                                          HCLGE_CMDQ_RX_DEPTH_REG,
63                                          HCLGE_CMDQ_RX_TAIL_REG,
64                                          HCLGE_CMDQ_RX_HEAD_REG,
65                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
66                                          HCLGE_CMDQ_INTR_STS_REG,
67                                          HCLGE_CMDQ_INTR_EN_REG,
68                                          HCLGE_CMDQ_INTR_GEN_REG};
69
70 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71                                            HCLGE_VECTOR0_OTER_EN_REG,
72                                            HCLGE_MISC_RESET_STS_REG,
73                                            HCLGE_MISC_VECTOR_INT_STS,
74                                            HCLGE_GLOBAL_RESET_REG,
75                                            HCLGE_FUN_RST_ING,
76                                            HCLGE_GRO_EN_REG};
77
78 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79                                          HCLGE_RING_RX_ADDR_H_REG,
80                                          HCLGE_RING_RX_BD_NUM_REG,
81                                          HCLGE_RING_RX_BD_LENGTH_REG,
82                                          HCLGE_RING_RX_MERGE_EN_REG,
83                                          HCLGE_RING_RX_TAIL_REG,
84                                          HCLGE_RING_RX_HEAD_REG,
85                                          HCLGE_RING_RX_FBD_NUM_REG,
86                                          HCLGE_RING_RX_OFFSET_REG,
87                                          HCLGE_RING_RX_FBD_OFFSET_REG,
88                                          HCLGE_RING_RX_STASH_REG,
89                                          HCLGE_RING_RX_BD_ERR_REG,
90                                          HCLGE_RING_TX_ADDR_L_REG,
91                                          HCLGE_RING_TX_ADDR_H_REG,
92                                          HCLGE_RING_TX_BD_NUM_REG,
93                                          HCLGE_RING_TX_PRIORITY_REG,
94                                          HCLGE_RING_TX_TC_REG,
95                                          HCLGE_RING_TX_MERGE_EN_REG,
96                                          HCLGE_RING_TX_TAIL_REG,
97                                          HCLGE_RING_TX_HEAD_REG,
98                                          HCLGE_RING_TX_FBD_NUM_REG,
99                                          HCLGE_RING_TX_OFFSET_REG,
100                                          HCLGE_RING_TX_EBD_NUM_REG,
101                                          HCLGE_RING_TX_EBD_OFFSET_REG,
102                                          HCLGE_RING_TX_BD_ERR_REG,
103                                          HCLGE_RING_EN_REG};
104
105 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106                                              HCLGE_TQP_INTR_GL0_REG,
107                                              HCLGE_TQP_INTR_GL1_REG,
108                                              HCLGE_TQP_INTR_GL2_REG,
109                                              HCLGE_TQP_INTR_RL_REG};
110
111 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
112         "App    Loopback test",
113         "Serdes serial Loopback test",
114         "Serdes parallel Loopback test",
115         "Phy    Loopback test"
116 };
117
118 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119         {"mac_tx_mac_pause_num",
120                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121         {"mac_rx_mac_pause_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
123         {"mac_tx_control_pkt_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125         {"mac_rx_control_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127         {"mac_tx_pfc_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
129         {"mac_tx_pfc_pri0_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131         {"mac_tx_pfc_pri1_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133         {"mac_tx_pfc_pri2_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135         {"mac_tx_pfc_pri3_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137         {"mac_tx_pfc_pri4_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139         {"mac_tx_pfc_pri5_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141         {"mac_tx_pfc_pri6_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143         {"mac_tx_pfc_pri7_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
145         {"mac_rx_pfc_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
147         {"mac_rx_pfc_pri0_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149         {"mac_rx_pfc_pri1_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151         {"mac_rx_pfc_pri2_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153         {"mac_rx_pfc_pri3_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155         {"mac_rx_pfc_pri4_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157         {"mac_rx_pfc_pri5_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159         {"mac_rx_pfc_pri6_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161         {"mac_rx_pfc_pri7_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163         {"mac_tx_total_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165         {"mac_tx_total_oct_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167         {"mac_tx_good_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169         {"mac_tx_bad_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171         {"mac_tx_good_oct_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173         {"mac_tx_bad_oct_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175         {"mac_tx_uni_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177         {"mac_tx_multi_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179         {"mac_tx_broad_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181         {"mac_tx_undersize_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
183         {"mac_tx_oversize_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
185         {"mac_tx_64_oct_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187         {"mac_tx_65_127_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189         {"mac_tx_128_255_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191         {"mac_tx_256_511_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193         {"mac_tx_512_1023_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195         {"mac_tx_1024_1518_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
197         {"mac_tx_1519_2047_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199         {"mac_tx_2048_4095_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201         {"mac_tx_4096_8191_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
203         {"mac_tx_8192_9216_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205         {"mac_tx_9217_12287_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207         {"mac_tx_12288_16383_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209         {"mac_tx_1519_max_good_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211         {"mac_tx_1519_max_bad_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
213         {"mac_rx_total_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215         {"mac_rx_total_oct_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217         {"mac_rx_good_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219         {"mac_rx_bad_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221         {"mac_rx_good_oct_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223         {"mac_rx_bad_oct_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225         {"mac_rx_uni_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227         {"mac_rx_multi_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229         {"mac_rx_broad_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231         {"mac_rx_undersize_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
233         {"mac_rx_oversize_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
235         {"mac_rx_64_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237         {"mac_rx_65_127_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239         {"mac_rx_128_255_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241         {"mac_rx_256_511_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243         {"mac_rx_512_1023_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245         {"mac_rx_1024_1518_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
247         {"mac_rx_1519_2047_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249         {"mac_rx_2048_4095_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251         {"mac_rx_4096_8191_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
253         {"mac_rx_8192_9216_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255         {"mac_rx_9217_12287_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257         {"mac_rx_12288_16383_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259         {"mac_rx_1519_max_good_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261         {"mac_rx_1519_max_bad_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
263
264         {"mac_tx_fragment_pkt_num",
265                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266         {"mac_tx_undermin_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268         {"mac_tx_jabber_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270         {"mac_tx_err_all_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272         {"mac_tx_from_app_good_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274         {"mac_tx_from_app_bad_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276         {"mac_rx_fragment_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278         {"mac_rx_undermin_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280         {"mac_rx_jabber_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282         {"mac_rx_fcs_err_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284         {"mac_rx_send_app_good_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286         {"mac_rx_send_app_bad_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
288 };
289
290 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
291         {
292                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296                 .i_port_bitmap = 0x1,
297         },
298 };
299
300 static const u8 hclge_hash_key[] = {
301         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
306 };
307
308 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
309 {
310 #define HCLGE_MAC_CMD_NUM 21
311
312         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
314         __le64 *desc_data;
315         int i, k, n;
316         int ret;
317
318         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
320         if (ret) {
321                 dev_err(&hdev->pdev->dev,
322                         "Get MAC pkt stats fail, status = %d.\n", ret);
323
324                 return ret;
325         }
326
327         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
328                 /* for special opcode 0032, only the first desc has the head */
329                 if (unlikely(i == 0)) {
330                         desc_data = (__le64 *)(&desc[i].data[0]);
331                         n = HCLGE_RD_FIRST_STATS_NUM;
332                 } else {
333                         desc_data = (__le64 *)(&desc[i]);
334                         n = HCLGE_RD_OTHER_STATS_NUM;
335                 }
336
337                 for (k = 0; k < n; k++) {
338                         *data += le64_to_cpu(*desc_data);
339                         data++;
340                         desc_data++;
341                 }
342         }
343
344         return 0;
345 }
346
347 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
348 {
349         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350         struct hclge_desc *desc;
351         __le64 *desc_data;
352         u16 i, k, n;
353         int ret;
354
355         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
356         if (!desc)
357                 return -ENOMEM;
358         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
360         if (ret) {
361                 kfree(desc);
362                 return ret;
363         }
364
365         for (i = 0; i < desc_num; i++) {
366                 /* for special opcode 0034, only the first desc has the head */
367                 if (i == 0) {
368                         desc_data = (__le64 *)(&desc[i].data[0]);
369                         n = HCLGE_RD_FIRST_STATS_NUM;
370                 } else {
371                         desc_data = (__le64 *)(&desc[i]);
372                         n = HCLGE_RD_OTHER_STATS_NUM;
373                 }
374
375                 for (k = 0; k < n; k++) {
376                         *data += le64_to_cpu(*desc_data);
377                         data++;
378                         desc_data++;
379                 }
380         }
381
382         kfree(desc);
383
384         return 0;
385 }
386
387 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
388 {
389         struct hclge_desc desc;
390         __le32 *desc_data;
391         u32 reg_num;
392         int ret;
393
394         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
396         if (ret)
397                 return ret;
398
399         desc_data = (__le32 *)(&desc.data[0]);
400         reg_num = le32_to_cpu(*desc_data);
401
402         *desc_num = 1 + ((reg_num - 3) >> 2) +
403                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
404
405         return 0;
406 }
407
408 static int hclge_mac_update_stats(struct hclge_dev *hdev)
409 {
410         u32 desc_num;
411         int ret;
412
413         ret = hclge_mac_query_reg_num(hdev, &desc_num);
414
415         /* The firmware supports the new statistics acquisition method */
416         if (!ret)
417                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
418         else if (ret == -EOPNOTSUPP)
419                 ret = hclge_mac_update_stats_defective(hdev);
420         else
421                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
422
423         return ret;
424 }
425
426 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
427 {
428         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429         struct hclge_vport *vport = hclge_get_vport(handle);
430         struct hclge_dev *hdev = vport->back;
431         struct hnae3_queue *queue;
432         struct hclge_desc desc[1];
433         struct hclge_tqp *tqp;
434         int ret, i;
435
436         for (i = 0; i < kinfo->num_tqps; i++) {
437                 queue = handle->kinfo.tqp[i];
438                 tqp = container_of(queue, struct hclge_tqp, q);
439                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
440                 hclge_cmd_setup_basic_desc(&desc[0],
441                                            HCLGE_OPC_QUERY_RX_STATUS,
442                                            true);
443
444                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
445                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
446                 if (ret) {
447                         dev_err(&hdev->pdev->dev,
448                                 "Query tqp stat fail, status = %d,queue = %d\n",
449                                 ret,    i);
450                         return ret;
451                 }
452                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
453                         le32_to_cpu(desc[0].data[1]);
454         }
455
456         for (i = 0; i < kinfo->num_tqps; i++) {
457                 queue = handle->kinfo.tqp[i];
458                 tqp = container_of(queue, struct hclge_tqp, q);
459                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
460                 hclge_cmd_setup_basic_desc(&desc[0],
461                                            HCLGE_OPC_QUERY_TX_STATUS,
462                                            true);
463
464                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
465                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
466                 if (ret) {
467                         dev_err(&hdev->pdev->dev,
468                                 "Query tqp stat fail, status = %d,queue = %d\n",
469                                 ret, i);
470                         return ret;
471                 }
472                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
473                         le32_to_cpu(desc[0].data[1]);
474         }
475
476         return 0;
477 }
478
479 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
480 {
481         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482         struct hclge_tqp *tqp;
483         u64 *buff = data;
484         int i;
485
486         for (i = 0; i < kinfo->num_tqps; i++) {
487                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
488                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
489         }
490
491         for (i = 0; i < kinfo->num_tqps; i++) {
492                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
494         }
495
496         return buff;
497 }
498
499 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
500 {
501         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
502
503         return kinfo->num_tqps * (2);
504 }
505
506 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
507 {
508         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
509         u8 *buff = data;
510         int i = 0;
511
512         for (i = 0; i < kinfo->num_tqps; i++) {
513                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514                         struct hclge_tqp, q);
515                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
516                          tqp->index);
517                 buff = buff + ETH_GSTRING_LEN;
518         }
519
520         for (i = 0; i < kinfo->num_tqps; i++) {
521                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522                         struct hclge_tqp, q);
523                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
524                          tqp->index);
525                 buff = buff + ETH_GSTRING_LEN;
526         }
527
528         return buff;
529 }
530
531 static u64 *hclge_comm_get_stats(void *comm_stats,
532                                  const struct hclge_comm_stats_str strs[],
533                                  int size, u64 *data)
534 {
535         u64 *buf = data;
536         u32 i;
537
538         for (i = 0; i < size; i++)
539                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
540
541         return buf + size;
542 }
543
544 static u8 *hclge_comm_get_strings(u32 stringset,
545                                   const struct hclge_comm_stats_str strs[],
546                                   int size, u8 *data)
547 {
548         char *buff = (char *)data;
549         u32 i;
550
551         if (stringset != ETH_SS_STATS)
552                 return buff;
553
554         for (i = 0; i < size; i++) {
555                 snprintf(buff, ETH_GSTRING_LEN,
556                          strs[i].desc);
557                 buff = buff + ETH_GSTRING_LEN;
558         }
559
560         return (u8 *)buff;
561 }
562
563 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
564 {
565         struct hnae3_handle *handle;
566         int status;
567
568         handle = &hdev->vport[0].nic;
569         if (handle->client) {
570                 status = hclge_tqps_update_stats(handle);
571                 if (status) {
572                         dev_err(&hdev->pdev->dev,
573                                 "Update TQPS stats fail, status = %d.\n",
574                                 status);
575                 }
576         }
577
578         status = hclge_mac_update_stats(hdev);
579         if (status)
580                 dev_err(&hdev->pdev->dev,
581                         "Update MAC stats fail, status = %d.\n", status);
582 }
583
584 static void hclge_update_stats(struct hnae3_handle *handle,
585                                struct net_device_stats *net_stats)
586 {
587         struct hclge_vport *vport = hclge_get_vport(handle);
588         struct hclge_dev *hdev = vport->back;
589         int status;
590
591         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
592                 return;
593
594         status = hclge_mac_update_stats(hdev);
595         if (status)
596                 dev_err(&hdev->pdev->dev,
597                         "Update MAC stats fail, status = %d.\n",
598                         status);
599
600         status = hclge_tqps_update_stats(handle);
601         if (status)
602                 dev_err(&hdev->pdev->dev,
603                         "Update TQPS stats fail, status = %d.\n",
604                         status);
605
606         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
607 }
608
609 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
610 {
611 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612                 HNAE3_SUPPORT_PHY_LOOPBACK |\
613                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
615
616         struct hclge_vport *vport = hclge_get_vport(handle);
617         struct hclge_dev *hdev = vport->back;
618         int count = 0;
619
620         /* Loopback test support rules:
621          * mac: only GE mode support
622          * serdes: all mac mode will support include GE/XGE/LGE/CGE
623          * phy: only support when phy device exist on board
624          */
625         if (stringset == ETH_SS_TEST) {
626                 /* clear loopback bit flags at first */
627                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
628                 if (hdev->pdev->revision >= 0x21 ||
629                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
630                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
632                         count += 1;
633                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
634                 }
635
636                 count += 2;
637                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
639         } else if (stringset == ETH_SS_STATS) {
640                 count = ARRAY_SIZE(g_mac_stats_string) +
641                         hclge_tqps_get_sset_count(handle, stringset);
642         }
643
644         return count;
645 }
646
647 static void hclge_get_strings(struct hnae3_handle *handle,
648                               u32 stringset,
649                               u8 *data)
650 {
651         u8 *p = (char *)data;
652         int size;
653
654         if (stringset == ETH_SS_STATS) {
655                 size = ARRAY_SIZE(g_mac_stats_string);
656                 p = hclge_comm_get_strings(stringset,
657                                            g_mac_stats_string,
658                                            size,
659                                            p);
660                 p = hclge_tqps_get_strings(handle, p);
661         } else if (stringset == ETH_SS_TEST) {
662                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
663                         memcpy(p,
664                                hns3_nic_test_strs[HNAE3_LOOP_APP],
665                                ETH_GSTRING_LEN);
666                         p += ETH_GSTRING_LEN;
667                 }
668                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
669                         memcpy(p,
670                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
671                                ETH_GSTRING_LEN);
672                         p += ETH_GSTRING_LEN;
673                 }
674                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
675                         memcpy(p,
676                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
677                                ETH_GSTRING_LEN);
678                         p += ETH_GSTRING_LEN;
679                 }
680                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
681                         memcpy(p,
682                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
683                                ETH_GSTRING_LEN);
684                         p += ETH_GSTRING_LEN;
685                 }
686         }
687 }
688
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
690 {
691         struct hclge_vport *vport = hclge_get_vport(handle);
692         struct hclge_dev *hdev = vport->back;
693         u64 *p;
694
695         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
696                                  g_mac_stats_string,
697                                  ARRAY_SIZE(g_mac_stats_string),
698                                  data);
699         p = hclge_tqps_get_stats(handle, p);
700 }
701
702 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
703                                      u64 *rx_cnt)
704 {
705         struct hclge_vport *vport = hclge_get_vport(handle);
706         struct hclge_dev *hdev = vport->back;
707
708         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
709         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
710 }
711
712 static int hclge_parse_func_status(struct hclge_dev *hdev,
713                                    struct hclge_func_status_cmd *status)
714 {
715         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
716                 return -EINVAL;
717
718         /* Set the pf to main pf */
719         if (status->pf_state & HCLGE_PF_STATE_MAIN)
720                 hdev->flag |= HCLGE_FLAG_MAIN;
721         else
722                 hdev->flag &= ~HCLGE_FLAG_MAIN;
723
724         return 0;
725 }
726
727 static int hclge_query_function_status(struct hclge_dev *hdev)
728 {
729         struct hclge_func_status_cmd *req;
730         struct hclge_desc desc;
731         int timeout = 0;
732         int ret;
733
734         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
735         req = (struct hclge_func_status_cmd *)desc.data;
736
737         do {
738                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
739                 if (ret) {
740                         dev_err(&hdev->pdev->dev,
741                                 "query function status failed %d.\n",
742                                 ret);
743
744                         return ret;
745                 }
746
747                 /* Check pf reset is done */
748                 if (req->pf_state)
749                         break;
750                 usleep_range(1000, 2000);
751         } while (timeout++ < 5);
752
753         ret = hclge_parse_func_status(hdev, req);
754
755         return ret;
756 }
757
758 static int hclge_query_pf_resource(struct hclge_dev *hdev)
759 {
760         struct hclge_pf_res_cmd *req;
761         struct hclge_desc desc;
762         int ret;
763
764         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
765         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
766         if (ret) {
767                 dev_err(&hdev->pdev->dev,
768                         "query pf resource failed %d.\n", ret);
769                 return ret;
770         }
771
772         req = (struct hclge_pf_res_cmd *)desc.data;
773         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
774         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
775
776         if (req->tx_buf_size)
777                 hdev->tx_buf_size =
778                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
779         else
780                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
781
782         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
783
784         if (req->dv_buf_size)
785                 hdev->dv_buf_size =
786                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
787         else
788                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
789
790         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
791
792         if (hnae3_dev_roce_supported(hdev)) {
793                 hdev->roce_base_msix_offset =
794                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
795                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
796                 hdev->num_roce_msi =
797                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
799
800                 /* PF should have NIC vectors and Roce vectors,
801                  * NIC vectors are queued before Roce vectors.
802                  */
803                 hdev->num_msi = hdev->num_roce_msi  +
804                                 hdev->roce_base_msix_offset;
805         } else {
806                 hdev->num_msi =
807                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
808                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
809         }
810
811         return 0;
812 }
813
814 static int hclge_parse_speed(int speed_cmd, int *speed)
815 {
816         switch (speed_cmd) {
817         case 6:
818                 *speed = HCLGE_MAC_SPEED_10M;
819                 break;
820         case 7:
821                 *speed = HCLGE_MAC_SPEED_100M;
822                 break;
823         case 0:
824                 *speed = HCLGE_MAC_SPEED_1G;
825                 break;
826         case 1:
827                 *speed = HCLGE_MAC_SPEED_10G;
828                 break;
829         case 2:
830                 *speed = HCLGE_MAC_SPEED_25G;
831                 break;
832         case 3:
833                 *speed = HCLGE_MAC_SPEED_40G;
834                 break;
835         case 4:
836                 *speed = HCLGE_MAC_SPEED_50G;
837                 break;
838         case 5:
839                 *speed = HCLGE_MAC_SPEED_100G;
840                 break;
841         default:
842                 return -EINVAL;
843         }
844
845         return 0;
846 }
847
848 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
849 {
850         struct hclge_vport *vport = hclge_get_vport(handle);
851         struct hclge_dev *hdev = vport->back;
852         u32 speed_ability = hdev->hw.mac.speed_ability;
853         u32 speed_bit = 0;
854
855         switch (speed) {
856         case HCLGE_MAC_SPEED_10M:
857                 speed_bit = HCLGE_SUPPORT_10M_BIT;
858                 break;
859         case HCLGE_MAC_SPEED_100M:
860                 speed_bit = HCLGE_SUPPORT_100M_BIT;
861                 break;
862         case HCLGE_MAC_SPEED_1G:
863                 speed_bit = HCLGE_SUPPORT_1G_BIT;
864                 break;
865         case HCLGE_MAC_SPEED_10G:
866                 speed_bit = HCLGE_SUPPORT_10G_BIT;
867                 break;
868         case HCLGE_MAC_SPEED_25G:
869                 speed_bit = HCLGE_SUPPORT_25G_BIT;
870                 break;
871         case HCLGE_MAC_SPEED_40G:
872                 speed_bit = HCLGE_SUPPORT_40G_BIT;
873                 break;
874         case HCLGE_MAC_SPEED_50G:
875                 speed_bit = HCLGE_SUPPORT_50G_BIT;
876                 break;
877         case HCLGE_MAC_SPEED_100G:
878                 speed_bit = HCLGE_SUPPORT_100G_BIT;
879                 break;
880         default:
881                 return -EINVAL;
882         }
883
884         if (speed_bit & speed_ability)
885                 return 0;
886
887         return -EINVAL;
888 }
889
890 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
891 {
892         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
893                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
894                                  mac->supported);
895         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
896                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
897                                  mac->supported);
898         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
899                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
900                                  mac->supported);
901         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
902                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
903                                  mac->supported);
904         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
905                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
906                                  mac->supported);
907 }
908
909 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
910 {
911         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
912                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
913                                  mac->supported);
914         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
915                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
916                                  mac->supported);
917         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
918                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
919                                  mac->supported);
920         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
921                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
922                                  mac->supported);
923         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
924                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
925                                  mac->supported);
926 }
927
928 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
929 {
930         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
931                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
932                                  mac->supported);
933         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
934                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
935                                  mac->supported);
936         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
937                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
938                                  mac->supported);
939         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
940                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
941                                  mac->supported);
942         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
943                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
944                                  mac->supported);
945 }
946
947 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
948 {
949         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
950                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
951                                  mac->supported);
952         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
953                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
954                                  mac->supported);
955         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
956                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
957                                  mac->supported);
958         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
959                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
960                                  mac->supported);
961         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
962                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
963                                  mac->supported);
964         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
965                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
966                                  mac->supported);
967 }
968
969 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
970                                         u8 speed_ability)
971 {
972         struct hclge_mac *mac = &hdev->hw.mac;
973
974         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
975                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
976                                  mac->supported);
977
978         hclge_convert_setting_sr(mac, speed_ability);
979         hclge_convert_setting_lr(mac, speed_ability);
980         hclge_convert_setting_cr(mac, speed_ability);
981
982         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
983         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
984 }
985
986 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
987                                             u8 speed_ability)
988 {
989         struct hclge_mac *mac = &hdev->hw.mac;
990
991         hclge_convert_setting_kr(mac, speed_ability);
992         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
993         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
994 }
995
996 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
997                                          u8 speed_ability)
998 {
999         unsigned long *supported = hdev->hw.mac.supported;
1000
1001         /* default to support all speed for GE port */
1002         if (!speed_ability)
1003                 speed_ability = HCLGE_SUPPORT_GE;
1004
1005         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1006                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1007                                  supported);
1008
1009         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1010                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1011                                  supported);
1012                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1013                                  supported);
1014         }
1015
1016         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1017                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1018                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1019         }
1020
1021         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1022         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1023         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1024 }
1025
1026 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1027 {
1028         u8 media_type = hdev->hw.mac.media_type;
1029
1030         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1031                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1032         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1033                 hclge_parse_copper_link_mode(hdev, speed_ability);
1034         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1035                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1036 }
1037 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1038 {
1039         struct hclge_cfg_param_cmd *req;
1040         u64 mac_addr_tmp_high;
1041         u64 mac_addr_tmp;
1042         int i;
1043
1044         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1045
1046         /* get the configuration */
1047         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1048                                               HCLGE_CFG_VMDQ_M,
1049                                               HCLGE_CFG_VMDQ_S);
1050         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1051                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1052         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1053                                             HCLGE_CFG_TQP_DESC_N_M,
1054                                             HCLGE_CFG_TQP_DESC_N_S);
1055
1056         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1057                                         HCLGE_CFG_PHY_ADDR_M,
1058                                         HCLGE_CFG_PHY_ADDR_S);
1059         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1060                                           HCLGE_CFG_MEDIA_TP_M,
1061                                           HCLGE_CFG_MEDIA_TP_S);
1062         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1063                                           HCLGE_CFG_RX_BUF_LEN_M,
1064                                           HCLGE_CFG_RX_BUF_LEN_S);
1065         /* get mac_address */
1066         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1067         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1068                                             HCLGE_CFG_MAC_ADDR_H_M,
1069                                             HCLGE_CFG_MAC_ADDR_H_S);
1070
1071         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1072
1073         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1074                                              HCLGE_CFG_DEFAULT_SPEED_M,
1075                                              HCLGE_CFG_DEFAULT_SPEED_S);
1076         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1077                                             HCLGE_CFG_RSS_SIZE_M,
1078                                             HCLGE_CFG_RSS_SIZE_S);
1079
1080         for (i = 0; i < ETH_ALEN; i++)
1081                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1082
1083         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1084         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1085
1086         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1087                                              HCLGE_CFG_SPEED_ABILITY_M,
1088                                              HCLGE_CFG_SPEED_ABILITY_S);
1089         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1090                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1091                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1092         if (!cfg->umv_space)
1093                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1094 }
1095
1096 /* hclge_get_cfg: query the static parameter from flash
1097  * @hdev: pointer to struct hclge_dev
1098  * @hcfg: the config structure to be getted
1099  */
1100 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1101 {
1102         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1103         struct hclge_cfg_param_cmd *req;
1104         int i, ret;
1105
1106         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1107                 u32 offset = 0;
1108
1109                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1110                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1111                                            true);
1112                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1113                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1114                 /* Len should be united by 4 bytes when send to hardware */
1115                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1116                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1117                 req->offset = cpu_to_le32(offset);
1118         }
1119
1120         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1121         if (ret) {
1122                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1123                 return ret;
1124         }
1125
1126         hclge_parse_cfg(hcfg, desc);
1127
1128         return 0;
1129 }
1130
1131 static int hclge_get_cap(struct hclge_dev *hdev)
1132 {
1133         int ret;
1134
1135         ret = hclge_query_function_status(hdev);
1136         if (ret) {
1137                 dev_err(&hdev->pdev->dev,
1138                         "query function status error %d.\n", ret);
1139                 return ret;
1140         }
1141
1142         /* get pf resource */
1143         ret = hclge_query_pf_resource(hdev);
1144         if (ret)
1145                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1146
1147         return ret;
1148 }
1149
1150 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1151 {
1152 #define HCLGE_MIN_TX_DESC       64
1153 #define HCLGE_MIN_RX_DESC       64
1154
1155         if (!is_kdump_kernel())
1156                 return;
1157
1158         dev_info(&hdev->pdev->dev,
1159                  "Running kdump kernel. Using minimal resources\n");
1160
1161         /* minimal queue pairs equals to the number of vports */
1162         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1163         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1164         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1165 }
1166
1167 static int hclge_configure(struct hclge_dev *hdev)
1168 {
1169         struct hclge_cfg cfg;
1170         int ret, i;
1171
1172         ret = hclge_get_cfg(hdev, &cfg);
1173         if (ret) {
1174                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1175                 return ret;
1176         }
1177
1178         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1179         hdev->base_tqp_pid = 0;
1180         hdev->rss_size_max = cfg.rss_size_max;
1181         hdev->rx_buf_len = cfg.rx_buf_len;
1182         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1183         hdev->hw.mac.media_type = cfg.media_type;
1184         hdev->hw.mac.phy_addr = cfg.phy_addr;
1185         hdev->num_tx_desc = cfg.tqp_desc_num;
1186         hdev->num_rx_desc = cfg.tqp_desc_num;
1187         hdev->tm_info.num_pg = 1;
1188         hdev->tc_max = cfg.tc_num;
1189         hdev->tm_info.hw_pfc_map = 0;
1190         hdev->wanted_umv_size = cfg.umv_space;
1191
1192         if (hnae3_dev_fd_supported(hdev))
1193                 hdev->fd_en = true;
1194
1195         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1196         if (ret) {
1197                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1198                 return ret;
1199         }
1200
1201         hclge_parse_link_mode(hdev, cfg.speed_ability);
1202
1203         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1204             (hdev->tc_max < 1)) {
1205                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1206                          hdev->tc_max);
1207                 hdev->tc_max = 1;
1208         }
1209
1210         /* Dev does not support DCB */
1211         if (!hnae3_dev_dcb_supported(hdev)) {
1212                 hdev->tc_max = 1;
1213                 hdev->pfc_max = 0;
1214         } else {
1215                 hdev->pfc_max = hdev->tc_max;
1216         }
1217
1218         hdev->tm_info.num_tc = 1;
1219
1220         /* Currently not support uncontiuous tc */
1221         for (i = 0; i < hdev->tm_info.num_tc; i++)
1222                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1223
1224         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1225
1226         hclge_init_kdump_kernel_config(hdev);
1227
1228         return ret;
1229 }
1230
1231 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1232                             int tso_mss_max)
1233 {
1234         struct hclge_cfg_tso_status_cmd *req;
1235         struct hclge_desc desc;
1236         u16 tso_mss;
1237
1238         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1239
1240         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1241
1242         tso_mss = 0;
1243         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1244                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1245         req->tso_mss_min = cpu_to_le16(tso_mss);
1246
1247         tso_mss = 0;
1248         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1249                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1250         req->tso_mss_max = cpu_to_le16(tso_mss);
1251
1252         return hclge_cmd_send(&hdev->hw, &desc, 1);
1253 }
1254
1255 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1256 {
1257         struct hclge_cfg_gro_status_cmd *req;
1258         struct hclge_desc desc;
1259         int ret;
1260
1261         if (!hnae3_dev_gro_supported(hdev))
1262                 return 0;
1263
1264         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1265         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1266
1267         req->gro_en = cpu_to_le16(en ? 1 : 0);
1268
1269         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1270         if (ret)
1271                 dev_err(&hdev->pdev->dev,
1272                         "GRO hardware config cmd failed, ret = %d\n", ret);
1273
1274         return ret;
1275 }
1276
1277 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1278 {
1279         struct hclge_tqp *tqp;
1280         int i;
1281
1282         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1283                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1284         if (!hdev->htqp)
1285                 return -ENOMEM;
1286
1287         tqp = hdev->htqp;
1288
1289         for (i = 0; i < hdev->num_tqps; i++) {
1290                 tqp->dev = &hdev->pdev->dev;
1291                 tqp->index = i;
1292
1293                 tqp->q.ae_algo = &ae_algo;
1294                 tqp->q.buf_size = hdev->rx_buf_len;
1295                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1296                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1297                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1298                         i * HCLGE_TQP_REG_SIZE;
1299
1300                 tqp++;
1301         }
1302
1303         return 0;
1304 }
1305
1306 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1307                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1308 {
1309         struct hclge_tqp_map_cmd *req;
1310         struct hclge_desc desc;
1311         int ret;
1312
1313         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1314
1315         req = (struct hclge_tqp_map_cmd *)desc.data;
1316         req->tqp_id = cpu_to_le16(tqp_pid);
1317         req->tqp_vf = func_id;
1318         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1319                         1 << HCLGE_TQP_MAP_EN_B;
1320         req->tqp_vid = cpu_to_le16(tqp_vid);
1321
1322         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1323         if (ret)
1324                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1325
1326         return ret;
1327 }
1328
1329 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1330 {
1331         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1332         struct hclge_dev *hdev = vport->back;
1333         int i, alloced;
1334
1335         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1336              alloced < num_tqps; i++) {
1337                 if (!hdev->htqp[i].alloced) {
1338                         hdev->htqp[i].q.handle = &vport->nic;
1339                         hdev->htqp[i].q.tqp_index = alloced;
1340                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1341                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1342                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1343                         hdev->htqp[i].alloced = true;
1344                         alloced++;
1345                 }
1346         }
1347         vport->alloc_tqps = alloced;
1348         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1349                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1350
1351         return 0;
1352 }
1353
1354 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1355                             u16 num_tx_desc, u16 num_rx_desc)
1356
1357 {
1358         struct hnae3_handle *nic = &vport->nic;
1359         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1360         struct hclge_dev *hdev = vport->back;
1361         int ret;
1362
1363         kinfo->num_tx_desc = num_tx_desc;
1364         kinfo->num_rx_desc = num_rx_desc;
1365
1366         kinfo->rx_buf_len = hdev->rx_buf_len;
1367
1368         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1369                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1370         if (!kinfo->tqp)
1371                 return -ENOMEM;
1372
1373         ret = hclge_assign_tqp(vport, num_tqps);
1374         if (ret)
1375                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1376
1377         return ret;
1378 }
1379
1380 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1381                                   struct hclge_vport *vport)
1382 {
1383         struct hnae3_handle *nic = &vport->nic;
1384         struct hnae3_knic_private_info *kinfo;
1385         u16 i;
1386
1387         kinfo = &nic->kinfo;
1388         for (i = 0; i < vport->alloc_tqps; i++) {
1389                 struct hclge_tqp *q =
1390                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1391                 bool is_pf;
1392                 int ret;
1393
1394                 is_pf = !(vport->vport_id);
1395                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1396                                              i, is_pf);
1397                 if (ret)
1398                         return ret;
1399         }
1400
1401         return 0;
1402 }
1403
1404 static int hclge_map_tqp(struct hclge_dev *hdev)
1405 {
1406         struct hclge_vport *vport = hdev->vport;
1407         u16 i, num_vport;
1408
1409         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1410         for (i = 0; i < num_vport; i++) {
1411                 int ret;
1412
1413                 ret = hclge_map_tqp_to_vport(hdev, vport);
1414                 if (ret)
1415                         return ret;
1416
1417                 vport++;
1418         }
1419
1420         return 0;
1421 }
1422
1423 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1424 {
1425         /* this would be initialized later */
1426 }
1427
1428 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1429 {
1430         struct hnae3_handle *nic = &vport->nic;
1431         struct hclge_dev *hdev = vport->back;
1432         int ret;
1433
1434         nic->pdev = hdev->pdev;
1435         nic->ae_algo = &ae_algo;
1436         nic->numa_node_mask = hdev->numa_node_mask;
1437
1438         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1439                 ret = hclge_knic_setup(vport, num_tqps,
1440                                        hdev->num_tx_desc, hdev->num_rx_desc);
1441
1442                 if (ret) {
1443                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1444                                 ret);
1445                         return ret;
1446                 }
1447         } else {
1448                 hclge_unic_setup(vport, num_tqps);
1449         }
1450
1451         return 0;
1452 }
1453
1454 static int hclge_alloc_vport(struct hclge_dev *hdev)
1455 {
1456         struct pci_dev *pdev = hdev->pdev;
1457         struct hclge_vport *vport;
1458         u32 tqp_main_vport;
1459         u32 tqp_per_vport;
1460         int num_vport, i;
1461         int ret;
1462
1463         /* We need to alloc a vport for main NIC of PF */
1464         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1465
1466         if (hdev->num_tqps < num_vport) {
1467                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1468                         hdev->num_tqps, num_vport);
1469                 return -EINVAL;
1470         }
1471
1472         /* Alloc the same number of TQPs for every vport */
1473         tqp_per_vport = hdev->num_tqps / num_vport;
1474         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1475
1476         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1477                              GFP_KERNEL);
1478         if (!vport)
1479                 return -ENOMEM;
1480
1481         hdev->vport = vport;
1482         hdev->num_alloc_vport = num_vport;
1483
1484         if (IS_ENABLED(CONFIG_PCI_IOV))
1485                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1486
1487         for (i = 0; i < num_vport; i++) {
1488                 vport->back = hdev;
1489                 vport->vport_id = i;
1490                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1491                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1492                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1493                 INIT_LIST_HEAD(&vport->vlan_list);
1494                 INIT_LIST_HEAD(&vport->uc_mac_list);
1495                 INIT_LIST_HEAD(&vport->mc_mac_list);
1496
1497                 if (i == 0)
1498                         ret = hclge_vport_setup(vport, tqp_main_vport);
1499                 else
1500                         ret = hclge_vport_setup(vport, tqp_per_vport);
1501                 if (ret) {
1502                         dev_err(&pdev->dev,
1503                                 "vport setup failed for vport %d, %d\n",
1504                                 i, ret);
1505                         return ret;
1506                 }
1507
1508                 vport++;
1509         }
1510
1511         return 0;
1512 }
1513
1514 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1515                                     struct hclge_pkt_buf_alloc *buf_alloc)
1516 {
1517 /* TX buffer size is unit by 128 byte */
1518 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1519 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1520         struct hclge_tx_buff_alloc_cmd *req;
1521         struct hclge_desc desc;
1522         int ret;
1523         u8 i;
1524
1525         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1526
1527         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1528         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1529                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1530
1531                 req->tx_pkt_buff[i] =
1532                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1533                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1534         }
1535
1536         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1537         if (ret)
1538                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1539                         ret);
1540
1541         return ret;
1542 }
1543
1544 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1545                                  struct hclge_pkt_buf_alloc *buf_alloc)
1546 {
1547         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1548
1549         if (ret)
1550                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1551
1552         return ret;
1553 }
1554
1555 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1556 {
1557         int i, cnt = 0;
1558
1559         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1560                 if (hdev->hw_tc_map & BIT(i))
1561                         cnt++;
1562         return cnt;
1563 }
1564
1565 /* Get the number of pfc enabled TCs, which have private buffer */
1566 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1567                                   struct hclge_pkt_buf_alloc *buf_alloc)
1568 {
1569         struct hclge_priv_buf *priv;
1570         int i, cnt = 0;
1571
1572         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1573                 priv = &buf_alloc->priv_buf[i];
1574                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1575                     priv->enable)
1576                         cnt++;
1577         }
1578
1579         return cnt;
1580 }
1581
1582 /* Get the number of pfc disabled TCs, which have private buffer */
1583 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1584                                      struct hclge_pkt_buf_alloc *buf_alloc)
1585 {
1586         struct hclge_priv_buf *priv;
1587         int i, cnt = 0;
1588
1589         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1590                 priv = &buf_alloc->priv_buf[i];
1591                 if (hdev->hw_tc_map & BIT(i) &&
1592                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1593                     priv->enable)
1594                         cnt++;
1595         }
1596
1597         return cnt;
1598 }
1599
1600 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1601 {
1602         struct hclge_priv_buf *priv;
1603         u32 rx_priv = 0;
1604         int i;
1605
1606         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1607                 priv = &buf_alloc->priv_buf[i];
1608                 if (priv->enable)
1609                         rx_priv += priv->buf_size;
1610         }
1611         return rx_priv;
1612 }
1613
1614 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1615 {
1616         u32 i, total_tx_size = 0;
1617
1618         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1619                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1620
1621         return total_tx_size;
1622 }
1623
1624 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1625                                 struct hclge_pkt_buf_alloc *buf_alloc,
1626                                 u32 rx_all)
1627 {
1628         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1629         u32 tc_num = hclge_get_tc_num(hdev);
1630         u32 shared_buf, aligned_mps;
1631         u32 rx_priv;
1632         int i;
1633
1634         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1635
1636         if (hnae3_dev_dcb_supported(hdev))
1637                 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1638         else
1639                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1640                                         + hdev->dv_buf_size;
1641
1642         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1643         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1644                              HCLGE_BUF_SIZE_UNIT);
1645
1646         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1647         if (rx_all < rx_priv + shared_std)
1648                 return false;
1649
1650         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1651         buf_alloc->s_buf.buf_size = shared_buf;
1652         if (hnae3_dev_dcb_supported(hdev)) {
1653                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1654                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1655                         - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1656         } else {
1657                 buf_alloc->s_buf.self.high = aligned_mps +
1658                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1659                 buf_alloc->s_buf.self.low = aligned_mps;
1660         }
1661
1662         if (hnae3_dev_dcb_supported(hdev)) {
1663                 if (tc_num)
1664                         hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1665                 else
1666                         hi_thrd = shared_buf - hdev->dv_buf_size;
1667
1668                 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1669                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1670                 lo_thrd = hi_thrd - aligned_mps / 2;
1671         } else {
1672                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1673                 lo_thrd = aligned_mps;
1674         }
1675
1676         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1677                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1678                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1679         }
1680
1681         return true;
1682 }
1683
1684 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1685                                 struct hclge_pkt_buf_alloc *buf_alloc)
1686 {
1687         u32 i, total_size;
1688
1689         total_size = hdev->pkt_buf_size;
1690
1691         /* alloc tx buffer for all enabled tc */
1692         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1693                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1694
1695                 if (hdev->hw_tc_map & BIT(i)) {
1696                         if (total_size < hdev->tx_buf_size)
1697                                 return -ENOMEM;
1698
1699                         priv->tx_buf_size = hdev->tx_buf_size;
1700                 } else {
1701                         priv->tx_buf_size = 0;
1702                 }
1703
1704                 total_size -= priv->tx_buf_size;
1705         }
1706
1707         return 0;
1708 }
1709
1710 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1711                                   struct hclge_pkt_buf_alloc *buf_alloc)
1712 {
1713         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1714         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1715         int i;
1716
1717         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1719
1720                 priv->enable = 0;
1721                 priv->wl.low = 0;
1722                 priv->wl.high = 0;
1723                 priv->buf_size = 0;
1724
1725                 if (!(hdev->hw_tc_map & BIT(i)))
1726                         continue;
1727
1728                 priv->enable = 1;
1729
1730                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1731                         priv->wl.low = max ? aligned_mps : 256;
1732                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1733                                                 HCLGE_BUF_SIZE_UNIT);
1734                 } else {
1735                         priv->wl.low = 0;
1736                         priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1737                 }
1738
1739                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1740         }
1741
1742         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1743 }
1744
1745 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1746                                           struct hclge_pkt_buf_alloc *buf_alloc)
1747 {
1748         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1749         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1750         int i;
1751
1752         /* let the last to be cleared first */
1753         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1754                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1755
1756                 if (hdev->hw_tc_map & BIT(i) &&
1757                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1758                         /* Clear the no pfc TC private buffer */
1759                         priv->wl.low = 0;
1760                         priv->wl.high = 0;
1761                         priv->buf_size = 0;
1762                         priv->enable = 0;
1763                         no_pfc_priv_num--;
1764                 }
1765
1766                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1767                     no_pfc_priv_num == 0)
1768                         break;
1769         }
1770
1771         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1772 }
1773
1774 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1775                                         struct hclge_pkt_buf_alloc *buf_alloc)
1776 {
1777         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1778         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1779         int i;
1780
1781         /* let the last to be cleared first */
1782         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1783                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1784
1785                 if (hdev->hw_tc_map & BIT(i) &&
1786                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1787                         /* Reduce the number of pfc TC with private buffer */
1788                         priv->wl.low = 0;
1789                         priv->enable = 0;
1790                         priv->wl.high = 0;
1791                         priv->buf_size = 0;
1792                         pfc_priv_num--;
1793                 }
1794
1795                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1796                     pfc_priv_num == 0)
1797                         break;
1798         }
1799
1800         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1801 }
1802
1803 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1804  * @hdev: pointer to struct hclge_dev
1805  * @buf_alloc: pointer to buffer calculation data
1806  * @return: 0: calculate sucessful, negative: fail
1807  */
1808 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1809                                 struct hclge_pkt_buf_alloc *buf_alloc)
1810 {
1811         /* When DCB is not supported, rx private buffer is not allocated. */
1812         if (!hnae3_dev_dcb_supported(hdev)) {
1813                 u32 rx_all = hdev->pkt_buf_size;
1814
1815                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1816                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1817                         return -ENOMEM;
1818
1819                 return 0;
1820         }
1821
1822         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1823                 return 0;
1824
1825         /* try to decrease the buffer size */
1826         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1827                 return 0;
1828
1829         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1830                 return 0;
1831
1832         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1833                 return 0;
1834
1835         return -ENOMEM;
1836 }
1837
1838 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1839                                    struct hclge_pkt_buf_alloc *buf_alloc)
1840 {
1841         struct hclge_rx_priv_buff_cmd *req;
1842         struct hclge_desc desc;
1843         int ret;
1844         int i;
1845
1846         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1847         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1848
1849         /* Alloc private buffer TCs */
1850         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1851                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1852
1853                 req->buf_num[i] =
1854                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1855                 req->buf_num[i] |=
1856                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1857         }
1858
1859         req->shared_buf =
1860                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1861                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1862
1863         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1864         if (ret)
1865                 dev_err(&hdev->pdev->dev,
1866                         "rx private buffer alloc cmd failed %d\n", ret);
1867
1868         return ret;
1869 }
1870
1871 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1872                                    struct hclge_pkt_buf_alloc *buf_alloc)
1873 {
1874         struct hclge_rx_priv_wl_buf *req;
1875         struct hclge_priv_buf *priv;
1876         struct hclge_desc desc[2];
1877         int i, j;
1878         int ret;
1879
1880         for (i = 0; i < 2; i++) {
1881                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1882                                            false);
1883                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1884
1885                 /* The first descriptor set the NEXT bit to 1 */
1886                 if (i == 0)
1887                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1888                 else
1889                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1890
1891                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1892                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1893
1894                         priv = &buf_alloc->priv_buf[idx];
1895                         req->tc_wl[j].high =
1896                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1897                         req->tc_wl[j].high |=
1898                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1899                         req->tc_wl[j].low =
1900                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1901                         req->tc_wl[j].low |=
1902                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1903                 }
1904         }
1905
1906         /* Send 2 descriptor at one time */
1907         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1908         if (ret)
1909                 dev_err(&hdev->pdev->dev,
1910                         "rx private waterline config cmd failed %d\n",
1911                         ret);
1912         return ret;
1913 }
1914
1915 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1916                                     struct hclge_pkt_buf_alloc *buf_alloc)
1917 {
1918         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1919         struct hclge_rx_com_thrd *req;
1920         struct hclge_desc desc[2];
1921         struct hclge_tc_thrd *tc;
1922         int i, j;
1923         int ret;
1924
1925         for (i = 0; i < 2; i++) {
1926                 hclge_cmd_setup_basic_desc(&desc[i],
1927                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1928                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1929
1930                 /* The first descriptor set the NEXT bit to 1 */
1931                 if (i == 0)
1932                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1933                 else
1934                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1935
1936                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1937                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1938
1939                         req->com_thrd[j].high =
1940                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1941                         req->com_thrd[j].high |=
1942                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1943                         req->com_thrd[j].low =
1944                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1945                         req->com_thrd[j].low |=
1946                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1947                 }
1948         }
1949
1950         /* Send 2 descriptors at one time */
1951         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1952         if (ret)
1953                 dev_err(&hdev->pdev->dev,
1954                         "common threshold config cmd failed %d\n", ret);
1955         return ret;
1956 }
1957
1958 static int hclge_common_wl_config(struct hclge_dev *hdev,
1959                                   struct hclge_pkt_buf_alloc *buf_alloc)
1960 {
1961         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1962         struct hclge_rx_com_wl *req;
1963         struct hclge_desc desc;
1964         int ret;
1965
1966         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1967
1968         req = (struct hclge_rx_com_wl *)desc.data;
1969         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1970         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1971
1972         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1973         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1974
1975         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1976         if (ret)
1977                 dev_err(&hdev->pdev->dev,
1978                         "common waterline config cmd failed %d\n", ret);
1979
1980         return ret;
1981 }
1982
1983 int hclge_buffer_alloc(struct hclge_dev *hdev)
1984 {
1985         struct hclge_pkt_buf_alloc *pkt_buf;
1986         int ret;
1987
1988         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1989         if (!pkt_buf)
1990                 return -ENOMEM;
1991
1992         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1993         if (ret) {
1994                 dev_err(&hdev->pdev->dev,
1995                         "could not calc tx buffer size for all TCs %d\n", ret);
1996                 goto out;
1997         }
1998
1999         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2000         if (ret) {
2001                 dev_err(&hdev->pdev->dev,
2002                         "could not alloc tx buffers %d\n", ret);
2003                 goto out;
2004         }
2005
2006         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2007         if (ret) {
2008                 dev_err(&hdev->pdev->dev,
2009                         "could not calc rx priv buffer size for all TCs %d\n",
2010                         ret);
2011                 goto out;
2012         }
2013
2014         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2015         if (ret) {
2016                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2017                         ret);
2018                 goto out;
2019         }
2020
2021         if (hnae3_dev_dcb_supported(hdev)) {
2022                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2023                 if (ret) {
2024                         dev_err(&hdev->pdev->dev,
2025                                 "could not configure rx private waterline %d\n",
2026                                 ret);
2027                         goto out;
2028                 }
2029
2030                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2031                 if (ret) {
2032                         dev_err(&hdev->pdev->dev,
2033                                 "could not configure common threshold %d\n",
2034                                 ret);
2035                         goto out;
2036                 }
2037         }
2038
2039         ret = hclge_common_wl_config(hdev, pkt_buf);
2040         if (ret)
2041                 dev_err(&hdev->pdev->dev,
2042                         "could not configure common waterline %d\n", ret);
2043
2044 out:
2045         kfree(pkt_buf);
2046         return ret;
2047 }
2048
2049 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2050 {
2051         struct hnae3_handle *roce = &vport->roce;
2052         struct hnae3_handle *nic = &vport->nic;
2053
2054         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2055
2056         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2057             vport->back->num_msi_left == 0)
2058                 return -EINVAL;
2059
2060         roce->rinfo.base_vector = vport->back->roce_base_vector;
2061
2062         roce->rinfo.netdev = nic->kinfo.netdev;
2063         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2064
2065         roce->pdev = nic->pdev;
2066         roce->ae_algo = nic->ae_algo;
2067         roce->numa_node_mask = nic->numa_node_mask;
2068
2069         return 0;
2070 }
2071
2072 static int hclge_init_msi(struct hclge_dev *hdev)
2073 {
2074         struct pci_dev *pdev = hdev->pdev;
2075         int vectors;
2076         int i;
2077
2078         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2079                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2080         if (vectors < 0) {
2081                 dev_err(&pdev->dev,
2082                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2083                         vectors);
2084                 return vectors;
2085         }
2086         if (vectors < hdev->num_msi)
2087                 dev_warn(&hdev->pdev->dev,
2088                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2089                          hdev->num_msi, vectors);
2090
2091         hdev->num_msi = vectors;
2092         hdev->num_msi_left = vectors;
2093         hdev->base_msi_vector = pdev->irq;
2094         hdev->roce_base_vector = hdev->base_msi_vector +
2095                                 hdev->roce_base_msix_offset;
2096
2097         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2098                                            sizeof(u16), GFP_KERNEL);
2099         if (!hdev->vector_status) {
2100                 pci_free_irq_vectors(pdev);
2101                 return -ENOMEM;
2102         }
2103
2104         for (i = 0; i < hdev->num_msi; i++)
2105                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2106
2107         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2108                                         sizeof(int), GFP_KERNEL);
2109         if (!hdev->vector_irq) {
2110                 pci_free_irq_vectors(pdev);
2111                 return -ENOMEM;
2112         }
2113
2114         return 0;
2115 }
2116
2117 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2118 {
2119
2120         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2121                 duplex = HCLGE_MAC_FULL;
2122
2123         return duplex;
2124 }
2125
2126 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2127                                       u8 duplex)
2128 {
2129         struct hclge_config_mac_speed_dup_cmd *req;
2130         struct hclge_desc desc;
2131         int ret;
2132
2133         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2134
2135         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2136
2137         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2138
2139         switch (speed) {
2140         case HCLGE_MAC_SPEED_10M:
2141                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2142                                 HCLGE_CFG_SPEED_S, 6);
2143                 break;
2144         case HCLGE_MAC_SPEED_100M:
2145                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2146                                 HCLGE_CFG_SPEED_S, 7);
2147                 break;
2148         case HCLGE_MAC_SPEED_1G:
2149                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2150                                 HCLGE_CFG_SPEED_S, 0);
2151                 break;
2152         case HCLGE_MAC_SPEED_10G:
2153                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2154                                 HCLGE_CFG_SPEED_S, 1);
2155                 break;
2156         case HCLGE_MAC_SPEED_25G:
2157                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2158                                 HCLGE_CFG_SPEED_S, 2);
2159                 break;
2160         case HCLGE_MAC_SPEED_40G:
2161                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2162                                 HCLGE_CFG_SPEED_S, 3);
2163                 break;
2164         case HCLGE_MAC_SPEED_50G:
2165                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2166                                 HCLGE_CFG_SPEED_S, 4);
2167                 break;
2168         case HCLGE_MAC_SPEED_100G:
2169                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2170                                 HCLGE_CFG_SPEED_S, 5);
2171                 break;
2172         default:
2173                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2174                 return -EINVAL;
2175         }
2176
2177         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2178                       1);
2179
2180         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2181         if (ret) {
2182                 dev_err(&hdev->pdev->dev,
2183                         "mac speed/duplex config cmd failed %d.\n", ret);
2184                 return ret;
2185         }
2186
2187         return 0;
2188 }
2189
2190 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2191 {
2192         int ret;
2193
2194         duplex = hclge_check_speed_dup(duplex, speed);
2195         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2196                 return 0;
2197
2198         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2199         if (ret)
2200                 return ret;
2201
2202         hdev->hw.mac.speed = speed;
2203         hdev->hw.mac.duplex = duplex;
2204
2205         return 0;
2206 }
2207
2208 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2209                                      u8 duplex)
2210 {
2211         struct hclge_vport *vport = hclge_get_vport(handle);
2212         struct hclge_dev *hdev = vport->back;
2213
2214         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2215 }
2216
2217 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2218 {
2219         struct hclge_config_auto_neg_cmd *req;
2220         struct hclge_desc desc;
2221         u32 flag = 0;
2222         int ret;
2223
2224         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2225
2226         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2227         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2228         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2229
2230         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2231         if (ret)
2232                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2233                         ret);
2234
2235         return ret;
2236 }
2237
2238 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2239 {
2240         struct hclge_vport *vport = hclge_get_vport(handle);
2241         struct hclge_dev *hdev = vport->back;
2242
2243         if (!hdev->hw.mac.support_autoneg) {
2244                 if (enable) {
2245                         dev_err(&hdev->pdev->dev,
2246                                 "autoneg is not supported by current port\n");
2247                         return -EOPNOTSUPP;
2248                 } else {
2249                         return 0;
2250                 }
2251         }
2252
2253         return hclge_set_autoneg_en(hdev, enable);
2254 }
2255
2256 static int hclge_get_autoneg(struct hnae3_handle *handle)
2257 {
2258         struct hclge_vport *vport = hclge_get_vport(handle);
2259         struct hclge_dev *hdev = vport->back;
2260         struct phy_device *phydev = hdev->hw.mac.phydev;
2261
2262         if (phydev)
2263                 return phydev->autoneg;
2264
2265         return hdev->hw.mac.autoneg;
2266 }
2267
2268 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2269 {
2270         struct hclge_vport *vport = hclge_get_vport(handle);
2271         struct hclge_dev *hdev = vport->back;
2272         int ret;
2273
2274         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2275
2276         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2277         if (ret)
2278                 return ret;
2279         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2280 }
2281
2282 static int hclge_mac_init(struct hclge_dev *hdev)
2283 {
2284         struct hclge_mac *mac = &hdev->hw.mac;
2285         int ret;
2286
2287         hdev->support_sfp_query = true;
2288         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2289         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2290                                          hdev->hw.mac.duplex);
2291         if (ret) {
2292                 dev_err(&hdev->pdev->dev,
2293                         "Config mac speed dup fail ret=%d\n", ret);
2294                 return ret;
2295         }
2296
2297         mac->link = 0;
2298
2299         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2300         if (ret) {
2301                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2302                 return ret;
2303         }
2304
2305         ret = hclge_buffer_alloc(hdev);
2306         if (ret)
2307                 dev_err(&hdev->pdev->dev,
2308                         "allocate buffer fail, ret=%d\n", ret);
2309
2310         return ret;
2311 }
2312
2313 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2314 {
2315         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2316             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2317                 schedule_work(&hdev->mbx_service_task);
2318 }
2319
2320 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2321 {
2322         if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2323                 schedule_work(&hdev->rst_service_task);
2324 }
2325
2326 static void hclge_task_schedule(struct hclge_dev *hdev)
2327 {
2328         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2329             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2330             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2331                 (void)schedule_work(&hdev->service_task);
2332 }
2333
2334 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2335 {
2336         struct hclge_link_status_cmd *req;
2337         struct hclge_desc desc;
2338         int link_status;
2339         int ret;
2340
2341         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2342         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2343         if (ret) {
2344                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2345                         ret);
2346                 return ret;
2347         }
2348
2349         req = (struct hclge_link_status_cmd *)desc.data;
2350         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2351
2352         return !!link_status;
2353 }
2354
2355 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2356 {
2357         int mac_state;
2358         int link_stat;
2359
2360         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2361                 return 0;
2362
2363         mac_state = hclge_get_mac_link_status(hdev);
2364
2365         if (hdev->hw.mac.phydev) {
2366                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2367                         link_stat = mac_state &
2368                                 hdev->hw.mac.phydev->link;
2369                 else
2370                         link_stat = 0;
2371
2372         } else {
2373                 link_stat = mac_state;
2374         }
2375
2376         return !!link_stat;
2377 }
2378
2379 static void hclge_update_link_status(struct hclge_dev *hdev)
2380 {
2381         struct hnae3_client *rclient = hdev->roce_client;
2382         struct hnae3_client *client = hdev->nic_client;
2383         struct hnae3_handle *rhandle;
2384         struct hnae3_handle *handle;
2385         int state;
2386         int i;
2387
2388         if (!client)
2389                 return;
2390         state = hclge_get_mac_phy_link(hdev);
2391         if (state != hdev->hw.mac.link) {
2392                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2393                         handle = &hdev->vport[i].nic;
2394                         client->ops->link_status_change(handle, state);
2395                         hclge_config_mac_tnl_int(hdev, state);
2396                         rhandle = &hdev->vport[i].roce;
2397                         if (rclient && rclient->ops->link_status_change)
2398                                 rclient->ops->link_status_change(rhandle,
2399                                                                  state);
2400                 }
2401                 hdev->hw.mac.link = state;
2402         }
2403 }
2404
2405 static void hclge_update_port_capability(struct hclge_mac *mac)
2406 {
2407         /* firmware can not identify back plane type, the media type
2408          * read from configuration can help deal it
2409          */
2410         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2411             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2412                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2413         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2414                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2415
2416         if (mac->support_autoneg == true) {
2417                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2418                 linkmode_copy(mac->advertising, mac->supported);
2419         } else {
2420                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2421                                    mac->supported);
2422                 linkmode_zero(mac->advertising);
2423         }
2424 }
2425
2426 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2427 {
2428         struct hclge_sfp_info_cmd *resp = NULL;
2429         struct hclge_desc desc;
2430         int ret;
2431
2432         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2433         resp = (struct hclge_sfp_info_cmd *)desc.data;
2434         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2435         if (ret == -EOPNOTSUPP) {
2436                 dev_warn(&hdev->pdev->dev,
2437                          "IMP do not support get SFP speed %d\n", ret);
2438                 return ret;
2439         } else if (ret) {
2440                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2441                 return ret;
2442         }
2443
2444         *speed = le32_to_cpu(resp->speed);
2445
2446         return 0;
2447 }
2448
2449 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2450 {
2451         struct hclge_sfp_info_cmd *resp;
2452         struct hclge_desc desc;
2453         int ret;
2454
2455         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2456         resp = (struct hclge_sfp_info_cmd *)desc.data;
2457
2458         resp->query_type = QUERY_ACTIVE_SPEED;
2459
2460         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2461         if (ret == -EOPNOTSUPP) {
2462                 dev_warn(&hdev->pdev->dev,
2463                          "IMP does not support get SFP info %d\n", ret);
2464                 return ret;
2465         } else if (ret) {
2466                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2467                 return ret;
2468         }
2469
2470         mac->speed = le32_to_cpu(resp->speed);
2471         /* if resp->speed_ability is 0, it means it's an old version
2472          * firmware, do not update these params
2473          */
2474         if (resp->speed_ability) {
2475                 mac->module_type = le32_to_cpu(resp->module_type);
2476                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2477                 mac->autoneg = resp->autoneg;
2478                 mac->support_autoneg = resp->autoneg_ability;
2479         } else {
2480                 mac->speed_type = QUERY_SFP_SPEED;
2481         }
2482
2483         return 0;
2484 }
2485
2486 static int hclge_update_port_info(struct hclge_dev *hdev)
2487 {
2488         struct hclge_mac *mac = &hdev->hw.mac;
2489         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2490         int ret;
2491
2492         /* get the port info from SFP cmd if not copper port */
2493         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2494                 return 0;
2495
2496         /* if IMP does not support get SFP/qSFP info, return directly */
2497         if (!hdev->support_sfp_query)
2498                 return 0;
2499
2500         if (hdev->pdev->revision >= 0x21)
2501                 ret = hclge_get_sfp_info(hdev, mac);
2502         else
2503                 ret = hclge_get_sfp_speed(hdev, &speed);
2504
2505         if (ret == -EOPNOTSUPP) {
2506                 hdev->support_sfp_query = false;
2507                 return ret;
2508         } else if (ret) {
2509                 return ret;
2510         }
2511
2512         if (hdev->pdev->revision >= 0x21) {
2513                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2514                         hclge_update_port_capability(mac);
2515                         return 0;
2516                 }
2517                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2518                                                HCLGE_MAC_FULL);
2519         } else {
2520                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2521                         return 0; /* do nothing if no SFP */
2522
2523                 /* must config full duplex for SFP */
2524                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2525         }
2526 }
2527
2528 static int hclge_get_status(struct hnae3_handle *handle)
2529 {
2530         struct hclge_vport *vport = hclge_get_vport(handle);
2531         struct hclge_dev *hdev = vport->back;
2532
2533         hclge_update_link_status(hdev);
2534
2535         return hdev->hw.mac.link;
2536 }
2537
2538 static void hclge_service_timer(struct timer_list *t)
2539 {
2540         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2541
2542         mod_timer(&hdev->service_timer, jiffies + HZ);
2543         hdev->hw_stats.stats_timer++;
2544         hclge_task_schedule(hdev);
2545 }
2546
2547 static void hclge_service_complete(struct hclge_dev *hdev)
2548 {
2549         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2550
2551         /* Flush memory before next watchdog */
2552         smp_mb__before_atomic();
2553         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2554 }
2555
2556 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2557 {
2558         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2559
2560         /* fetch the events from their corresponding regs */
2561         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2562         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2563         msix_src_reg = hclge_read_dev(&hdev->hw,
2564                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2565
2566         /* Assumption: If by any chance reset and mailbox events are reported
2567          * together then we will only process reset event in this go and will
2568          * defer the processing of the mailbox events. Since, we would have not
2569          * cleared RX CMDQ event this time we would receive again another
2570          * interrupt from H/W just for the mailbox.
2571          */
2572
2573         /* check for vector0 reset event sources */
2574         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2575                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2576                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2577                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2578                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2579                 hdev->rst_stats.imp_rst_cnt++;
2580                 return HCLGE_VECTOR0_EVENT_RST;
2581         }
2582
2583         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2584                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2585                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2586                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2587                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2588                 hdev->rst_stats.global_rst_cnt++;
2589                 return HCLGE_VECTOR0_EVENT_RST;
2590         }
2591
2592         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2593                 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2594                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2595                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2596                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2597                 hdev->rst_stats.core_rst_cnt++;
2598                 return HCLGE_VECTOR0_EVENT_RST;
2599         }
2600
2601         /* check for vector0 msix event source */
2602         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2603                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2604                         msix_src_reg);
2605                 return HCLGE_VECTOR0_EVENT_ERR;
2606         }
2607
2608         /* check for vector0 mailbox(=CMDQ RX) event source */
2609         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2610                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2611                 *clearval = cmdq_src_reg;
2612                 return HCLGE_VECTOR0_EVENT_MBX;
2613         }
2614
2615         /* print other vector0 event source */
2616         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2617                 cmdq_src_reg, msix_src_reg);
2618         return HCLGE_VECTOR0_EVENT_OTHER;
2619 }
2620
2621 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2622                                     u32 regclr)
2623 {
2624         switch (event_type) {
2625         case HCLGE_VECTOR0_EVENT_RST:
2626                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2627                 break;
2628         case HCLGE_VECTOR0_EVENT_MBX:
2629                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2630                 break;
2631         default:
2632                 break;
2633         }
2634 }
2635
2636 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2637 {
2638         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2639                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2640                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2641                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2642         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2643 }
2644
2645 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2646 {
2647         writel(enable ? 1 : 0, vector->addr);
2648 }
2649
2650 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2651 {
2652         struct hclge_dev *hdev = data;
2653         u32 event_cause;
2654         u32 clearval;
2655
2656         hclge_enable_vector(&hdev->misc_vector, false);
2657         event_cause = hclge_check_event_cause(hdev, &clearval);
2658
2659         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2660         switch (event_cause) {
2661         case HCLGE_VECTOR0_EVENT_ERR:
2662                 /* we do not know what type of reset is required now. This could
2663                  * only be decided after we fetch the type of errors which
2664                  * caused this event. Therefore, we will do below for now:
2665                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2666                  *    have defered type of reset to be used.
2667                  * 2. Schedule the reset serivce task.
2668                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2669                  *    will fetch the correct type of reset.  This would be done
2670                  *    by first decoding the types of errors.
2671                  */
2672                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2673                 /* fall through */
2674         case HCLGE_VECTOR0_EVENT_RST:
2675                 hclge_reset_task_schedule(hdev);
2676                 break;
2677         case HCLGE_VECTOR0_EVENT_MBX:
2678                 /* If we are here then,
2679                  * 1. Either we are not handling any mbx task and we are not
2680                  *    scheduled as well
2681                  *                        OR
2682                  * 2. We could be handling a mbx task but nothing more is
2683                  *    scheduled.
2684                  * In both cases, we should schedule mbx task as there are more
2685                  * mbx messages reported by this interrupt.
2686                  */
2687                 hclge_mbx_task_schedule(hdev);
2688                 break;
2689         default:
2690                 dev_warn(&hdev->pdev->dev,
2691                          "received unknown or unhandled event of vector0\n");
2692                 break;
2693         }
2694
2695         /* clear the source of interrupt if it is not cause by reset */
2696         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2697                 hclge_clear_event_cause(hdev, event_cause, clearval);
2698                 hclge_enable_vector(&hdev->misc_vector, true);
2699         }
2700
2701         return IRQ_HANDLED;
2702 }
2703
2704 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2705 {
2706         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2707                 dev_warn(&hdev->pdev->dev,
2708                          "vector(vector_id %d) has been freed.\n", vector_id);
2709                 return;
2710         }
2711
2712         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2713         hdev->num_msi_left += 1;
2714         hdev->num_msi_used -= 1;
2715 }
2716
2717 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2718 {
2719         struct hclge_misc_vector *vector = &hdev->misc_vector;
2720
2721         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2722
2723         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2724         hdev->vector_status[0] = 0;
2725
2726         hdev->num_msi_left -= 1;
2727         hdev->num_msi_used += 1;
2728 }
2729
2730 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2731 {
2732         int ret;
2733
2734         hclge_get_misc_vector(hdev);
2735
2736         /* this would be explicitly freed in the end */
2737         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2738                           0, "hclge_misc", hdev);
2739         if (ret) {
2740                 hclge_free_vector(hdev, 0);
2741                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2742                         hdev->misc_vector.vector_irq);
2743         }
2744
2745         return ret;
2746 }
2747
2748 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2749 {
2750         free_irq(hdev->misc_vector.vector_irq, hdev);
2751         hclge_free_vector(hdev, 0);
2752 }
2753
2754 int hclge_notify_client(struct hclge_dev *hdev,
2755                         enum hnae3_reset_notify_type type)
2756 {
2757         struct hnae3_client *client = hdev->nic_client;
2758         u16 i;
2759
2760         if (!client->ops->reset_notify)
2761                 return -EOPNOTSUPP;
2762
2763         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2764                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2765                 int ret;
2766
2767                 ret = client->ops->reset_notify(handle, type);
2768                 if (ret) {
2769                         dev_err(&hdev->pdev->dev,
2770                                 "notify nic client failed %d(%d)\n", type, ret);
2771                         return ret;
2772                 }
2773         }
2774
2775         return 0;
2776 }
2777
2778 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2779                                     enum hnae3_reset_notify_type type)
2780 {
2781         struct hnae3_client *client = hdev->roce_client;
2782         int ret = 0;
2783         u16 i;
2784
2785         if (!client)
2786                 return 0;
2787
2788         if (!client->ops->reset_notify)
2789                 return -EOPNOTSUPP;
2790
2791         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2792                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2793
2794                 ret = client->ops->reset_notify(handle, type);
2795                 if (ret) {
2796                         dev_err(&hdev->pdev->dev,
2797                                 "notify roce client failed %d(%d)",
2798                                 type, ret);
2799                         return ret;
2800                 }
2801         }
2802
2803         return ret;
2804 }
2805
2806 static int hclge_reset_wait(struct hclge_dev *hdev)
2807 {
2808 #define HCLGE_RESET_WATI_MS     100
2809 #define HCLGE_RESET_WAIT_CNT    200
2810         u32 val, reg, reg_bit;
2811         u32 cnt = 0;
2812
2813         switch (hdev->reset_type) {
2814         case HNAE3_IMP_RESET:
2815                 reg = HCLGE_GLOBAL_RESET_REG;
2816                 reg_bit = HCLGE_IMP_RESET_BIT;
2817                 break;
2818         case HNAE3_GLOBAL_RESET:
2819                 reg = HCLGE_GLOBAL_RESET_REG;
2820                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2821                 break;
2822         case HNAE3_CORE_RESET:
2823                 reg = HCLGE_GLOBAL_RESET_REG;
2824                 reg_bit = HCLGE_CORE_RESET_BIT;
2825                 break;
2826         case HNAE3_FUNC_RESET:
2827                 reg = HCLGE_FUN_RST_ING;
2828                 reg_bit = HCLGE_FUN_RST_ING_B;
2829                 break;
2830         case HNAE3_FLR_RESET:
2831                 break;
2832         default:
2833                 dev_err(&hdev->pdev->dev,
2834                         "Wait for unsupported reset type: %d\n",
2835                         hdev->reset_type);
2836                 return -EINVAL;
2837         }
2838
2839         if (hdev->reset_type == HNAE3_FLR_RESET) {
2840                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2841                        cnt++ < HCLGE_RESET_WAIT_CNT)
2842                         msleep(HCLGE_RESET_WATI_MS);
2843
2844                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2845                         dev_err(&hdev->pdev->dev,
2846                                 "flr wait timeout: %d\n", cnt);
2847                         return -EBUSY;
2848                 }
2849
2850                 return 0;
2851         }
2852
2853         val = hclge_read_dev(&hdev->hw, reg);
2854         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2855                 msleep(HCLGE_RESET_WATI_MS);
2856                 val = hclge_read_dev(&hdev->hw, reg);
2857                 cnt++;
2858         }
2859
2860         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2861                 dev_warn(&hdev->pdev->dev,
2862                          "Wait for reset timeout: %d\n", hdev->reset_type);
2863                 return -EBUSY;
2864         }
2865
2866         return 0;
2867 }
2868
2869 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2870 {
2871         struct hclge_vf_rst_cmd *req;
2872         struct hclge_desc desc;
2873
2874         req = (struct hclge_vf_rst_cmd *)desc.data;
2875         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2876         req->dest_vfid = func_id;
2877
2878         if (reset)
2879                 req->vf_rst = 0x1;
2880
2881         return hclge_cmd_send(&hdev->hw, &desc, 1);
2882 }
2883
2884 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2885 {
2886         int i;
2887
2888         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2889                 struct hclge_vport *vport = &hdev->vport[i];
2890                 int ret;
2891
2892                 /* Send cmd to set/clear VF's FUNC_RST_ING */
2893                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2894                 if (ret) {
2895                         dev_err(&hdev->pdev->dev,
2896                                 "set vf(%d) rst failed %d!\n",
2897                                 vport->vport_id, ret);
2898                         return ret;
2899                 }
2900
2901                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2902                         continue;
2903
2904                 /* Inform VF to process the reset.
2905                  * hclge_inform_reset_assert_to_vf may fail if VF
2906                  * driver is not loaded.
2907                  */
2908                 ret = hclge_inform_reset_assert_to_vf(vport);
2909                 if (ret)
2910                         dev_warn(&hdev->pdev->dev,
2911                                  "inform reset to vf(%d) failed %d!\n",
2912                                  vport->vport_id, ret);
2913         }
2914
2915         return 0;
2916 }
2917
2918 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2919 {
2920         struct hclge_desc desc;
2921         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2922         int ret;
2923
2924         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2925         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2926         req->fun_reset_vfid = func_id;
2927
2928         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2929         if (ret)
2930                 dev_err(&hdev->pdev->dev,
2931                         "send function reset cmd fail, status =%d\n", ret);
2932
2933         return ret;
2934 }
2935
2936 static void hclge_do_reset(struct hclge_dev *hdev)
2937 {
2938         struct hnae3_handle *handle = &hdev->vport[0].nic;
2939         struct pci_dev *pdev = hdev->pdev;
2940         u32 val;
2941
2942         if (hclge_get_hw_reset_stat(handle)) {
2943                 dev_info(&pdev->dev, "Hardware reset not finish\n");
2944                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
2945                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
2946                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
2947                 return;
2948         }
2949
2950         switch (hdev->reset_type) {
2951         case HNAE3_GLOBAL_RESET:
2952                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2953                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2954                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2955                 dev_info(&pdev->dev, "Global Reset requested\n");
2956                 break;
2957         case HNAE3_CORE_RESET:
2958                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2959                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2960                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2961                 dev_info(&pdev->dev, "Core Reset requested\n");
2962                 break;
2963         case HNAE3_FUNC_RESET:
2964                 dev_info(&pdev->dev, "PF Reset requested\n");
2965                 /* schedule again to check later */
2966                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2967                 hclge_reset_task_schedule(hdev);
2968                 break;
2969         case HNAE3_FLR_RESET:
2970                 dev_info(&pdev->dev, "FLR requested\n");
2971                 /* schedule again to check later */
2972                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2973                 hclge_reset_task_schedule(hdev);
2974                 break;
2975         default:
2976                 dev_warn(&pdev->dev,
2977                          "Unsupported reset type: %d\n", hdev->reset_type);
2978                 break;
2979         }
2980 }
2981
2982 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2983                                                    unsigned long *addr)
2984 {
2985         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2986
2987         /* first, resolve any unknown reset type to the known type(s) */
2988         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2989                 /* we will intentionally ignore any errors from this function
2990                  *  as we will end up in *some* reset request in any case
2991                  */
2992                 hclge_handle_hw_msix_error(hdev, addr);
2993                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2994                 /* We defered the clearing of the error event which caused
2995                  * interrupt since it was not posssible to do that in
2996                  * interrupt context (and this is the reason we introduced
2997                  * new UNKNOWN reset type). Now, the errors have been
2998                  * handled and cleared in hardware we can safely enable
2999                  * interrupts. This is an exception to the norm.
3000                  */
3001                 hclge_enable_vector(&hdev->misc_vector, true);
3002         }
3003
3004         /* return the highest priority reset level amongst all */
3005         if (test_bit(HNAE3_IMP_RESET, addr)) {
3006                 rst_level = HNAE3_IMP_RESET;
3007                 clear_bit(HNAE3_IMP_RESET, addr);
3008                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3009                 clear_bit(HNAE3_CORE_RESET, addr);
3010                 clear_bit(HNAE3_FUNC_RESET, addr);
3011         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3012                 rst_level = HNAE3_GLOBAL_RESET;
3013                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3014                 clear_bit(HNAE3_CORE_RESET, addr);
3015                 clear_bit(HNAE3_FUNC_RESET, addr);
3016         } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3017                 rst_level = HNAE3_CORE_RESET;
3018                 clear_bit(HNAE3_CORE_RESET, addr);
3019                 clear_bit(HNAE3_FUNC_RESET, addr);
3020         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3021                 rst_level = HNAE3_FUNC_RESET;
3022                 clear_bit(HNAE3_FUNC_RESET, addr);
3023         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3024                 rst_level = HNAE3_FLR_RESET;
3025                 clear_bit(HNAE3_FLR_RESET, addr);
3026         }
3027
3028         if (hdev->reset_type != HNAE3_NONE_RESET &&
3029             rst_level < hdev->reset_type)
3030                 return HNAE3_NONE_RESET;
3031
3032         return rst_level;
3033 }
3034
3035 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3036 {
3037         u32 clearval = 0;
3038
3039         switch (hdev->reset_type) {
3040         case HNAE3_IMP_RESET:
3041                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3042                 break;
3043         case HNAE3_GLOBAL_RESET:
3044                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3045                 break;
3046         case HNAE3_CORE_RESET:
3047                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3048                 break;
3049         default:
3050                 break;
3051         }
3052
3053         if (!clearval)
3054                 return;
3055
3056         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3057         hclge_enable_vector(&hdev->misc_vector, true);
3058 }
3059
3060 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3061 {
3062         int ret = 0;
3063
3064         switch (hdev->reset_type) {
3065         case HNAE3_FUNC_RESET:
3066                 /* fall through */
3067         case HNAE3_FLR_RESET:
3068                 ret = hclge_set_all_vf_rst(hdev, true);
3069                 break;
3070         default:
3071                 break;
3072         }
3073
3074         return ret;
3075 }
3076
3077 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3078 {
3079         u32 reg_val;
3080         int ret = 0;
3081
3082         switch (hdev->reset_type) {
3083         case HNAE3_FUNC_RESET:
3084                 /* There is no mechanism for PF to know if VF has stopped IO
3085                  * for now, just wait 100 ms for VF to stop IO
3086                  */
3087                 msleep(100);
3088                 ret = hclge_func_reset_cmd(hdev, 0);
3089                 if (ret) {
3090                         dev_err(&hdev->pdev->dev,
3091                                 "asserting function reset fail %d!\n", ret);
3092                         return ret;
3093                 }
3094
3095                 /* After performaning pf reset, it is not necessary to do the
3096                  * mailbox handling or send any command to firmware, because
3097                  * any mailbox handling or command to firmware is only valid
3098                  * after hclge_cmd_init is called.
3099                  */
3100                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3101                 hdev->rst_stats.pf_rst_cnt++;
3102                 break;
3103         case HNAE3_FLR_RESET:
3104                 /* There is no mechanism for PF to know if VF has stopped IO
3105                  * for now, just wait 100 ms for VF to stop IO
3106                  */
3107                 msleep(100);
3108                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3109                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3110                 hdev->rst_stats.flr_rst_cnt++;
3111                 break;
3112         case HNAE3_IMP_RESET:
3113                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3114                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3115                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3116                 break;
3117         default:
3118                 break;
3119         }
3120
3121         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3122
3123         return ret;
3124 }
3125
3126 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3127 {
3128 #define MAX_RESET_FAIL_CNT 5
3129 #define RESET_UPGRADE_DELAY_SEC 10
3130
3131         if (hdev->reset_pending) {
3132                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3133                          hdev->reset_pending);
3134                 return true;
3135         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3136                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3137                     BIT(HCLGE_IMP_RESET_BIT))) {
3138                 dev_info(&hdev->pdev->dev,
3139                          "reset failed because IMP Reset is pending\n");
3140                 hclge_clear_reset_cause(hdev);
3141                 return false;
3142         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3143                 hdev->reset_fail_cnt++;
3144                 if (is_timeout) {
3145                         set_bit(hdev->reset_type, &hdev->reset_pending);
3146                         dev_info(&hdev->pdev->dev,
3147                                  "re-schedule to wait for hw reset done\n");
3148                         return true;
3149                 }
3150
3151                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3152                 hclge_clear_reset_cause(hdev);
3153                 mod_timer(&hdev->reset_timer,
3154                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3155
3156                 return false;
3157         }
3158
3159         hclge_clear_reset_cause(hdev);
3160         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3161         return false;
3162 }
3163
3164 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3165 {
3166         int ret = 0;
3167
3168         switch (hdev->reset_type) {
3169         case HNAE3_FUNC_RESET:
3170                 /* fall through */
3171         case HNAE3_FLR_RESET:
3172                 ret = hclge_set_all_vf_rst(hdev, false);
3173                 break;
3174         default:
3175                 break;
3176         }
3177
3178         return ret;
3179 }
3180
3181 static void hclge_reset(struct hclge_dev *hdev)
3182 {
3183         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3184         bool is_timeout = false;
3185         int ret;
3186
3187         /* Initialize ae_dev reset status as well, in case enet layer wants to
3188          * know if device is undergoing reset
3189          */
3190         ae_dev->reset_type = hdev->reset_type;
3191         hdev->rst_stats.reset_cnt++;
3192         /* perform reset of the stack & ae device for a client */
3193         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3194         if (ret)
3195                 goto err_reset;
3196
3197         ret = hclge_reset_prepare_down(hdev);
3198         if (ret)
3199                 goto err_reset;
3200
3201         rtnl_lock();
3202         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3203         if (ret)
3204                 goto err_reset_lock;
3205
3206         rtnl_unlock();
3207
3208         ret = hclge_reset_prepare_wait(hdev);
3209         if (ret)
3210                 goto err_reset;
3211
3212         if (hclge_reset_wait(hdev)) {
3213                 is_timeout = true;
3214                 goto err_reset;
3215         }
3216
3217         hdev->rst_stats.hw_reset_done_cnt++;
3218
3219         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3220         if (ret)
3221                 goto err_reset;
3222
3223         rtnl_lock();
3224         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3225         if (ret)
3226                 goto err_reset_lock;
3227
3228         ret = hclge_reset_ae_dev(hdev->ae_dev);
3229         if (ret)
3230                 goto err_reset_lock;
3231
3232         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3233         if (ret)
3234                 goto err_reset_lock;
3235
3236         ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3237         if (ret)
3238                 goto err_reset_lock;
3239
3240         hclge_clear_reset_cause(hdev);
3241
3242         ret = hclge_reset_prepare_up(hdev);
3243         if (ret)
3244                 goto err_reset_lock;
3245
3246         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3247         if (ret)
3248                 goto err_reset_lock;
3249
3250         rtnl_unlock();
3251
3252         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3253         if (ret)
3254                 goto err_reset;
3255
3256         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3257         if (ret)
3258                 goto err_reset;
3259
3260         hdev->last_reset_time = jiffies;
3261         hdev->reset_fail_cnt = 0;
3262         hdev->rst_stats.reset_done_cnt++;
3263         ae_dev->reset_type = HNAE3_NONE_RESET;
3264         del_timer(&hdev->reset_timer);
3265
3266         return;
3267
3268 err_reset_lock:
3269         rtnl_unlock();
3270 err_reset:
3271         if (hclge_reset_err_handle(hdev, is_timeout))
3272                 hclge_reset_task_schedule(hdev);
3273 }
3274
3275 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3276 {
3277         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3278         struct hclge_dev *hdev = ae_dev->priv;
3279
3280         /* We might end up getting called broadly because of 2 below cases:
3281          * 1. Recoverable error was conveyed through APEI and only way to bring
3282          *    normalcy is to reset.
3283          * 2. A new reset request from the stack due to timeout
3284          *
3285          * For the first case,error event might not have ae handle available.
3286          * check if this is a new reset request and we are not here just because
3287          * last reset attempt did not succeed and watchdog hit us again. We will
3288          * know this if last reset request did not occur very recently (watchdog
3289          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3290          * In case of new request we reset the "reset level" to PF reset.
3291          * And if it is a repeat reset request of the most recent one then we
3292          * want to make sure we throttle the reset request. Therefore, we will
3293          * not allow it again before 3*HZ times.
3294          */
3295         if (!handle)
3296                 handle = &hdev->vport[0].nic;
3297
3298         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3299                 return;
3300         else if (hdev->default_reset_request)
3301                 hdev->reset_level =
3302                         hclge_get_reset_level(hdev,
3303                                               &hdev->default_reset_request);
3304         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3305                 hdev->reset_level = HNAE3_FUNC_RESET;
3306
3307         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3308                  hdev->reset_level);
3309
3310         /* request reset & schedule reset task */
3311         set_bit(hdev->reset_level, &hdev->reset_request);
3312         hclge_reset_task_schedule(hdev);
3313
3314         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3315                 hdev->reset_level++;
3316 }
3317
3318 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3319                                         enum hnae3_reset_type rst_type)
3320 {
3321         struct hclge_dev *hdev = ae_dev->priv;
3322
3323         set_bit(rst_type, &hdev->default_reset_request);
3324 }
3325
3326 static void hclge_reset_timer(struct timer_list *t)
3327 {
3328         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3329
3330         dev_info(&hdev->pdev->dev,
3331                  "triggering global reset in reset timer\n");
3332         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3333         hclge_reset_event(hdev->pdev, NULL);
3334 }
3335
3336 static void hclge_reset_subtask(struct hclge_dev *hdev)
3337 {
3338         /* check if there is any ongoing reset in the hardware. This status can
3339          * be checked from reset_pending. If there is then, we need to wait for
3340          * hardware to complete reset.
3341          *    a. If we are able to figure out in reasonable time that hardware
3342          *       has fully resetted then, we can proceed with driver, client
3343          *       reset.
3344          *    b. else, we can come back later to check this status so re-sched
3345          *       now.
3346          */
3347         hdev->last_reset_time = jiffies;
3348         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3349         if (hdev->reset_type != HNAE3_NONE_RESET)
3350                 hclge_reset(hdev);
3351
3352         /* check if we got any *new* reset requests to be honored */
3353         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3354         if (hdev->reset_type != HNAE3_NONE_RESET)
3355                 hclge_do_reset(hdev);
3356
3357         hdev->reset_type = HNAE3_NONE_RESET;
3358 }
3359
3360 static void hclge_reset_service_task(struct work_struct *work)
3361 {
3362         struct hclge_dev *hdev =
3363                 container_of(work, struct hclge_dev, rst_service_task);
3364
3365         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3366                 return;
3367
3368         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3369
3370         hclge_reset_subtask(hdev);
3371
3372         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3373 }
3374
3375 static void hclge_mailbox_service_task(struct work_struct *work)
3376 {
3377         struct hclge_dev *hdev =
3378                 container_of(work, struct hclge_dev, mbx_service_task);
3379
3380         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3381                 return;
3382
3383         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3384
3385         hclge_mbx_handler(hdev);
3386
3387         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3388 }
3389
3390 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3391 {
3392         int i;
3393
3394         /* start from vport 1 for PF is always alive */
3395         for (i = 1; i < hdev->num_alloc_vport; i++) {
3396                 struct hclge_vport *vport = &hdev->vport[i];
3397
3398                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3399                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3400
3401                 /* If vf is not alive, set to default value */
3402                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3403                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3404         }
3405 }
3406
3407 static void hclge_service_task(struct work_struct *work)
3408 {
3409         struct hclge_dev *hdev =
3410                 container_of(work, struct hclge_dev, service_task);
3411
3412         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3413                 hclge_update_stats_for_all(hdev);
3414                 hdev->hw_stats.stats_timer = 0;
3415         }
3416
3417         hclge_update_port_info(hdev);
3418         hclge_update_link_status(hdev);
3419         hclge_update_vport_alive(hdev);
3420         hclge_service_complete(hdev);
3421 }
3422
3423 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3424 {
3425         /* VF handle has no client */
3426         if (!handle->client)
3427                 return container_of(handle, struct hclge_vport, nic);
3428         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3429                 return container_of(handle, struct hclge_vport, roce);
3430         else
3431                 return container_of(handle, struct hclge_vport, nic);
3432 }
3433
3434 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3435                             struct hnae3_vector_info *vector_info)
3436 {
3437         struct hclge_vport *vport = hclge_get_vport(handle);
3438         struct hnae3_vector_info *vector = vector_info;
3439         struct hclge_dev *hdev = vport->back;
3440         int alloc = 0;
3441         int i, j;
3442
3443         vector_num = min(hdev->num_msi_left, vector_num);
3444
3445         for (j = 0; j < vector_num; j++) {
3446                 for (i = 1; i < hdev->num_msi; i++) {
3447                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3448                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3449                                 vector->io_addr = hdev->hw.io_base +
3450                                         HCLGE_VECTOR_REG_BASE +
3451                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3452                                         vport->vport_id *
3453                                         HCLGE_VECTOR_VF_OFFSET;
3454                                 hdev->vector_status[i] = vport->vport_id;
3455                                 hdev->vector_irq[i] = vector->vector;
3456
3457                                 vector++;
3458                                 alloc++;
3459
3460                                 break;
3461                         }
3462                 }
3463         }
3464         hdev->num_msi_left -= alloc;
3465         hdev->num_msi_used += alloc;
3466
3467         return alloc;
3468 }
3469
3470 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3471 {
3472         int i;
3473
3474         for (i = 0; i < hdev->num_msi; i++)
3475                 if (vector == hdev->vector_irq[i])
3476                         return i;
3477
3478         return -EINVAL;
3479 }
3480
3481 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3482 {
3483         struct hclge_vport *vport = hclge_get_vport(handle);
3484         struct hclge_dev *hdev = vport->back;
3485         int vector_id;
3486
3487         vector_id = hclge_get_vector_index(hdev, vector);
3488         if (vector_id < 0) {
3489                 dev_err(&hdev->pdev->dev,
3490                         "Get vector index fail. vector_id =%d\n", vector_id);
3491                 return vector_id;
3492         }
3493
3494         hclge_free_vector(hdev, vector_id);
3495
3496         return 0;
3497 }
3498
3499 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3500 {
3501         return HCLGE_RSS_KEY_SIZE;
3502 }
3503
3504 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3505 {
3506         return HCLGE_RSS_IND_TBL_SIZE;
3507 }
3508
3509 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3510                                   const u8 hfunc, const u8 *key)
3511 {
3512         struct hclge_rss_config_cmd *req;
3513         struct hclge_desc desc;
3514         int key_offset;
3515         int key_size;
3516         int ret;
3517
3518         req = (struct hclge_rss_config_cmd *)desc.data;
3519
3520         for (key_offset = 0; key_offset < 3; key_offset++) {
3521                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3522                                            false);
3523
3524                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3525                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3526
3527                 if (key_offset == 2)
3528                         key_size =
3529                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3530                 else
3531                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3532
3533                 memcpy(req->hash_key,
3534                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3535
3536                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3537                 if (ret) {
3538                         dev_err(&hdev->pdev->dev,
3539                                 "Configure RSS config fail, status = %d\n",
3540                                 ret);
3541                         return ret;
3542                 }
3543         }
3544         return 0;
3545 }
3546
3547 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3548 {
3549         struct hclge_rss_indirection_table_cmd *req;
3550         struct hclge_desc desc;
3551         int i, j;
3552         int ret;
3553
3554         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3555
3556         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3557                 hclge_cmd_setup_basic_desc
3558                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3559
3560                 req->start_table_index =
3561                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3562                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3563
3564                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3565                         req->rss_result[j] =
3566                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3567
3568                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3569                 if (ret) {
3570                         dev_err(&hdev->pdev->dev,
3571                                 "Configure rss indir table fail,status = %d\n",
3572                                 ret);
3573                         return ret;
3574                 }
3575         }
3576         return 0;
3577 }
3578
3579 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3580                                  u16 *tc_size, u16 *tc_offset)
3581 {
3582         struct hclge_rss_tc_mode_cmd *req;
3583         struct hclge_desc desc;
3584         int ret;
3585         int i;
3586
3587         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3588         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3589
3590         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3591                 u16 mode = 0;
3592
3593                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3594                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3595                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3596                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3597                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3598
3599                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3600         }
3601
3602         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3603         if (ret)
3604                 dev_err(&hdev->pdev->dev,
3605                         "Configure rss tc mode fail, status = %d\n", ret);
3606
3607         return ret;
3608 }
3609
3610 static void hclge_get_rss_type(struct hclge_vport *vport)
3611 {
3612         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3613             vport->rss_tuple_sets.ipv4_udp_en ||
3614             vport->rss_tuple_sets.ipv4_sctp_en ||
3615             vport->rss_tuple_sets.ipv6_tcp_en ||
3616             vport->rss_tuple_sets.ipv6_udp_en ||
3617             vport->rss_tuple_sets.ipv6_sctp_en)
3618                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3619         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3620                  vport->rss_tuple_sets.ipv6_fragment_en)
3621                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3622         else
3623                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3624 }
3625
3626 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3627 {
3628         struct hclge_rss_input_tuple_cmd *req;
3629         struct hclge_desc desc;
3630         int ret;
3631
3632         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3633
3634         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3635
3636         /* Get the tuple cfg from pf */
3637         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3638         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3639         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3640         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3641         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3642         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3643         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3644         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3645         hclge_get_rss_type(&hdev->vport[0]);
3646         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3647         if (ret)
3648                 dev_err(&hdev->pdev->dev,
3649                         "Configure rss input fail, status = %d\n", ret);
3650         return ret;
3651 }
3652
3653 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3654                          u8 *key, u8 *hfunc)
3655 {
3656         struct hclge_vport *vport = hclge_get_vport(handle);
3657         int i;
3658
3659         /* Get hash algorithm */
3660         if (hfunc) {
3661                 switch (vport->rss_algo) {
3662                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3663                         *hfunc = ETH_RSS_HASH_TOP;
3664                         break;
3665                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3666                         *hfunc = ETH_RSS_HASH_XOR;
3667                         break;
3668                 default:
3669                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3670                         break;
3671                 }
3672         }
3673
3674         /* Get the RSS Key required by the user */
3675         if (key)
3676                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3677
3678         /* Get indirect table */
3679         if (indir)
3680                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3681                         indir[i] =  vport->rss_indirection_tbl[i];
3682
3683         return 0;
3684 }
3685
3686 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3687                          const  u8 *key, const  u8 hfunc)
3688 {
3689         struct hclge_vport *vport = hclge_get_vport(handle);
3690         struct hclge_dev *hdev = vport->back;
3691         u8 hash_algo;
3692         int ret, i;
3693
3694         /* Set the RSS Hash Key if specififed by the user */
3695         if (key) {
3696                 switch (hfunc) {
3697                 case ETH_RSS_HASH_TOP:
3698                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3699                         break;
3700                 case ETH_RSS_HASH_XOR:
3701                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3702                         break;
3703                 case ETH_RSS_HASH_NO_CHANGE:
3704                         hash_algo = vport->rss_algo;
3705                         break;
3706                 default:
3707                         return -EINVAL;
3708                 }
3709
3710                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3711                 if (ret)
3712                         return ret;
3713
3714                 /* Update the shadow RSS key with user specified qids */
3715                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3716                 vport->rss_algo = hash_algo;
3717         }
3718
3719         /* Update the shadow RSS table with user specified qids */
3720         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3721                 vport->rss_indirection_tbl[i] = indir[i];
3722
3723         /* Update the hardware */
3724         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3725 }
3726
3727 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3728 {
3729         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3730
3731         if (nfc->data & RXH_L4_B_2_3)
3732                 hash_sets |= HCLGE_D_PORT_BIT;
3733         else
3734                 hash_sets &= ~HCLGE_D_PORT_BIT;
3735
3736         if (nfc->data & RXH_IP_SRC)
3737                 hash_sets |= HCLGE_S_IP_BIT;
3738         else
3739                 hash_sets &= ~HCLGE_S_IP_BIT;
3740
3741         if (nfc->data & RXH_IP_DST)
3742                 hash_sets |= HCLGE_D_IP_BIT;
3743         else
3744                 hash_sets &= ~HCLGE_D_IP_BIT;
3745
3746         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3747                 hash_sets |= HCLGE_V_TAG_BIT;
3748
3749         return hash_sets;
3750 }
3751
3752 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3753                                struct ethtool_rxnfc *nfc)
3754 {
3755         struct hclge_vport *vport = hclge_get_vport(handle);
3756         struct hclge_dev *hdev = vport->back;
3757         struct hclge_rss_input_tuple_cmd *req;
3758         struct hclge_desc desc;
3759         u8 tuple_sets;
3760         int ret;
3761
3762         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3763                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3764                 return -EINVAL;
3765
3766         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3767         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3768
3769         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3770         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3771         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3772         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3773         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3774         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3775         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3776         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3777
3778         tuple_sets = hclge_get_rss_hash_bits(nfc);
3779         switch (nfc->flow_type) {
3780         case TCP_V4_FLOW:
3781                 req->ipv4_tcp_en = tuple_sets;
3782                 break;
3783         case TCP_V6_FLOW:
3784                 req->ipv6_tcp_en = tuple_sets;
3785                 break;
3786         case UDP_V4_FLOW:
3787                 req->ipv4_udp_en = tuple_sets;
3788                 break;
3789         case UDP_V6_FLOW:
3790                 req->ipv6_udp_en = tuple_sets;
3791                 break;
3792         case SCTP_V4_FLOW:
3793                 req->ipv4_sctp_en = tuple_sets;
3794                 break;
3795         case SCTP_V6_FLOW:
3796                 if ((nfc->data & RXH_L4_B_0_1) ||
3797                     (nfc->data & RXH_L4_B_2_3))
3798                         return -EINVAL;
3799
3800                 req->ipv6_sctp_en = tuple_sets;
3801                 break;
3802         case IPV4_FLOW:
3803                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3804                 break;
3805         case IPV6_FLOW:
3806                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3807                 break;
3808         default:
3809                 return -EINVAL;
3810         }
3811
3812         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3813         if (ret) {
3814                 dev_err(&hdev->pdev->dev,
3815                         "Set rss tuple fail, status = %d\n", ret);
3816                 return ret;
3817         }
3818
3819         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3820         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3821         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3822         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3823         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3824         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3825         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3826         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3827         hclge_get_rss_type(vport);
3828         return 0;
3829 }
3830
3831 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3832                                struct ethtool_rxnfc *nfc)
3833 {
3834         struct hclge_vport *vport = hclge_get_vport(handle);
3835         u8 tuple_sets;
3836
3837         nfc->data = 0;
3838
3839         switch (nfc->flow_type) {
3840         case TCP_V4_FLOW:
3841                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3842                 break;
3843         case UDP_V4_FLOW:
3844                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3845                 break;
3846         case TCP_V6_FLOW:
3847                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3848                 break;
3849         case UDP_V6_FLOW:
3850                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3851                 break;
3852         case SCTP_V4_FLOW:
3853                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3854                 break;
3855         case SCTP_V6_FLOW:
3856                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3857                 break;
3858         case IPV4_FLOW:
3859         case IPV6_FLOW:
3860                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3861                 break;
3862         default:
3863                 return -EINVAL;
3864         }
3865
3866         if (!tuple_sets)
3867                 return 0;
3868
3869         if (tuple_sets & HCLGE_D_PORT_BIT)
3870                 nfc->data |= RXH_L4_B_2_3;
3871         if (tuple_sets & HCLGE_S_PORT_BIT)
3872                 nfc->data |= RXH_L4_B_0_1;
3873         if (tuple_sets & HCLGE_D_IP_BIT)
3874                 nfc->data |= RXH_IP_DST;
3875         if (tuple_sets & HCLGE_S_IP_BIT)
3876                 nfc->data |= RXH_IP_SRC;
3877
3878         return 0;
3879 }
3880
3881 static int hclge_get_tc_size(struct hnae3_handle *handle)
3882 {
3883         struct hclge_vport *vport = hclge_get_vport(handle);
3884         struct hclge_dev *hdev = vport->back;
3885
3886         return hdev->rss_size_max;
3887 }
3888
3889 int hclge_rss_init_hw(struct hclge_dev *hdev)
3890 {
3891         struct hclge_vport *vport = hdev->vport;
3892         u8 *rss_indir = vport[0].rss_indirection_tbl;
3893         u16 rss_size = vport[0].alloc_rss_size;
3894         u8 *key = vport[0].rss_hash_key;
3895         u8 hfunc = vport[0].rss_algo;
3896         u16 tc_offset[HCLGE_MAX_TC_NUM];
3897         u16 tc_valid[HCLGE_MAX_TC_NUM];
3898         u16 tc_size[HCLGE_MAX_TC_NUM];
3899         u16 roundup_size;
3900         int i, ret;
3901
3902         ret = hclge_set_rss_indir_table(hdev, rss_indir);
3903         if (ret)
3904                 return ret;
3905
3906         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3907         if (ret)
3908                 return ret;
3909
3910         ret = hclge_set_rss_input_tuple(hdev);
3911         if (ret)
3912                 return ret;
3913
3914         /* Each TC have the same queue size, and tc_size set to hardware is
3915          * the log2 of roundup power of two of rss_size, the acutal queue
3916          * size is limited by indirection table.
3917          */
3918         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3919                 dev_err(&hdev->pdev->dev,
3920                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3921                         rss_size);
3922                 return -EINVAL;
3923         }
3924
3925         roundup_size = roundup_pow_of_two(rss_size);
3926         roundup_size = ilog2(roundup_size);
3927
3928         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3929                 tc_valid[i] = 0;
3930
3931                 if (!(hdev->hw_tc_map & BIT(i)))
3932                         continue;
3933
3934                 tc_valid[i] = 1;
3935                 tc_size[i] = roundup_size;
3936                 tc_offset[i] = rss_size * i;
3937         }
3938
3939         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3940 }
3941
3942 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3943 {
3944         struct hclge_vport *vport = hdev->vport;
3945         int i, j;
3946
3947         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3948                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3949                         vport[j].rss_indirection_tbl[i] =
3950                                 i % vport[j].alloc_rss_size;
3951         }
3952 }
3953
3954 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3955 {
3956         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3957         struct hclge_vport *vport = hdev->vport;
3958
3959         if (hdev->pdev->revision >= 0x21)
3960                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3961
3962         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3963                 vport[i].rss_tuple_sets.ipv4_tcp_en =
3964                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3965                 vport[i].rss_tuple_sets.ipv4_udp_en =
3966                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3967                 vport[i].rss_tuple_sets.ipv4_sctp_en =
3968                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3969                 vport[i].rss_tuple_sets.ipv4_fragment_en =
3970                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3971                 vport[i].rss_tuple_sets.ipv6_tcp_en =
3972                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3973                 vport[i].rss_tuple_sets.ipv6_udp_en =
3974                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3975                 vport[i].rss_tuple_sets.ipv6_sctp_en =
3976                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3977                 vport[i].rss_tuple_sets.ipv6_fragment_en =
3978                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3979
3980                 vport[i].rss_algo = rss_algo;
3981
3982                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
3983                        HCLGE_RSS_KEY_SIZE);
3984         }
3985
3986         hclge_rss_indir_init_cfg(hdev);
3987 }
3988
3989 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3990                                 int vector_id, bool en,
3991                                 struct hnae3_ring_chain_node *ring_chain)
3992 {
3993         struct hclge_dev *hdev = vport->back;
3994         struct hnae3_ring_chain_node *node;
3995         struct hclge_desc desc;
3996         struct hclge_ctrl_vector_chain_cmd *req
3997                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3998         enum hclge_cmd_status status;
3999         enum hclge_opcode_type op;
4000         u16 tqp_type_and_id;
4001         int i;
4002
4003         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4004         hclge_cmd_setup_basic_desc(&desc, op, false);
4005         req->int_vector_id = vector_id;
4006
4007         i = 0;
4008         for (node = ring_chain; node; node = node->next) {
4009                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4010                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4011                                 HCLGE_INT_TYPE_S,
4012                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4013                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4014                                 HCLGE_TQP_ID_S, node->tqp_index);
4015                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4016                                 HCLGE_INT_GL_IDX_S,
4017                                 hnae3_get_field(node->int_gl_idx,
4018                                                 HNAE3_RING_GL_IDX_M,
4019                                                 HNAE3_RING_GL_IDX_S));
4020                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4021                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4022                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4023                         req->vfid = vport->vport_id;
4024
4025                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4026                         if (status) {
4027                                 dev_err(&hdev->pdev->dev,
4028                                         "Map TQP fail, status is %d.\n",
4029                                         status);
4030                                 return -EIO;
4031                         }
4032                         i = 0;
4033
4034                         hclge_cmd_setup_basic_desc(&desc,
4035                                                    op,
4036                                                    false);
4037                         req->int_vector_id = vector_id;
4038                 }
4039         }
4040
4041         if (i > 0) {
4042                 req->int_cause_num = i;
4043                 req->vfid = vport->vport_id;
4044                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4045                 if (status) {
4046                         dev_err(&hdev->pdev->dev,
4047                                 "Map TQP fail, status is %d.\n", status);
4048                         return -EIO;
4049                 }
4050         }
4051
4052         return 0;
4053 }
4054
4055 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4056                                     int vector,
4057                                     struct hnae3_ring_chain_node *ring_chain)
4058 {
4059         struct hclge_vport *vport = hclge_get_vport(handle);
4060         struct hclge_dev *hdev = vport->back;
4061         int vector_id;
4062
4063         vector_id = hclge_get_vector_index(hdev, vector);
4064         if (vector_id < 0) {
4065                 dev_err(&hdev->pdev->dev,
4066                         "Get vector index fail. vector_id =%d\n", vector_id);
4067                 return vector_id;
4068         }
4069
4070         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4071 }
4072
4073 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4074                                        int vector,
4075                                        struct hnae3_ring_chain_node *ring_chain)
4076 {
4077         struct hclge_vport *vport = hclge_get_vport(handle);
4078         struct hclge_dev *hdev = vport->back;
4079         int vector_id, ret;
4080
4081         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4082                 return 0;
4083
4084         vector_id = hclge_get_vector_index(hdev, vector);
4085         if (vector_id < 0) {
4086                 dev_err(&handle->pdev->dev,
4087                         "Get vector index fail. ret =%d\n", vector_id);
4088                 return vector_id;
4089         }
4090
4091         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4092         if (ret)
4093                 dev_err(&handle->pdev->dev,
4094                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4095                         vector_id,
4096                         ret);
4097
4098         return ret;
4099 }
4100
4101 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4102                                struct hclge_promisc_param *param)
4103 {
4104         struct hclge_promisc_cfg_cmd *req;
4105         struct hclge_desc desc;
4106         int ret;
4107
4108         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4109
4110         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4111         req->vf_id = param->vf_id;
4112
4113         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4114          * pdev revision(0x20), new revision support them. The
4115          * value of this two fields will not return error when driver
4116          * send command to fireware in revision(0x20).
4117          */
4118         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4119                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4120
4121         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4122         if (ret)
4123                 dev_err(&hdev->pdev->dev,
4124                         "Set promisc mode fail, status is %d.\n", ret);
4125
4126         return ret;
4127 }
4128
4129 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4130                               bool en_mc, bool en_bc, int vport_id)
4131 {
4132         if (!param)
4133                 return;
4134
4135         memset(param, 0, sizeof(struct hclge_promisc_param));
4136         if (en_uc)
4137                 param->enable = HCLGE_PROMISC_EN_UC;
4138         if (en_mc)
4139                 param->enable |= HCLGE_PROMISC_EN_MC;
4140         if (en_bc)
4141                 param->enable |= HCLGE_PROMISC_EN_BC;
4142         param->vf_id = vport_id;
4143 }
4144
4145 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4146                                   bool en_mc_pmc)
4147 {
4148         struct hclge_vport *vport = hclge_get_vport(handle);
4149         struct hclge_dev *hdev = vport->back;
4150         struct hclge_promisc_param param;
4151         bool en_bc_pmc = true;
4152
4153         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4154          * always bypassed. So broadcast promisc should be disabled until
4155          * user enable promisc mode
4156          */
4157         if (handle->pdev->revision == 0x20)
4158                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4159
4160         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4161                                  vport->vport_id);
4162         return hclge_cmd_set_promisc_mode(hdev, &param);
4163 }
4164
4165 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4166 {
4167         struct hclge_get_fd_mode_cmd *req;
4168         struct hclge_desc desc;
4169         int ret;
4170
4171         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4172
4173         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4174
4175         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4176         if (ret) {
4177                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4178                 return ret;
4179         }
4180
4181         *fd_mode = req->mode;
4182
4183         return ret;
4184 }
4185
4186 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4187                                    u32 *stage1_entry_num,
4188                                    u32 *stage2_entry_num,
4189                                    u16 *stage1_counter_num,
4190                                    u16 *stage2_counter_num)
4191 {
4192         struct hclge_get_fd_allocation_cmd *req;
4193         struct hclge_desc desc;
4194         int ret;
4195
4196         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4197
4198         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4199
4200         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4201         if (ret) {
4202                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4203                         ret);
4204                 return ret;
4205         }
4206
4207         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4208         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4209         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4210         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4211
4212         return ret;
4213 }
4214
4215 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4216 {
4217         struct hclge_set_fd_key_config_cmd *req;
4218         struct hclge_fd_key_cfg *stage;
4219         struct hclge_desc desc;
4220         int ret;
4221
4222         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4223
4224         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4225         stage = &hdev->fd_cfg.key_cfg[stage_num];
4226         req->stage = stage_num;
4227         req->key_select = stage->key_sel;
4228         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4229         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4230         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4231         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4232         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4233         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4234
4235         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4236         if (ret)
4237                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4238
4239         return ret;
4240 }
4241
4242 static int hclge_init_fd_config(struct hclge_dev *hdev)
4243 {
4244 #define LOW_2_WORDS             0x03
4245         struct hclge_fd_key_cfg *key_cfg;
4246         int ret;
4247
4248         if (!hnae3_dev_fd_supported(hdev))
4249                 return 0;
4250
4251         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4252         if (ret)
4253                 return ret;
4254
4255         switch (hdev->fd_cfg.fd_mode) {
4256         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4257                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4258                 break;
4259         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4260                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4261                 break;
4262         default:
4263                 dev_err(&hdev->pdev->dev,
4264                         "Unsupported flow director mode %d\n",
4265                         hdev->fd_cfg.fd_mode);
4266                 return -EOPNOTSUPP;
4267         }
4268
4269         hdev->fd_cfg.proto_support =
4270                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4271                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4272         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4273         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4274         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4275         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4276         key_cfg->outer_sipv6_word_en = 0;
4277         key_cfg->outer_dipv6_word_en = 0;
4278
4279         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4280                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4281                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4282                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4283
4284         /* If use max 400bit key, we can support tuples for ether type */
4285         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4286                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4287                 key_cfg->tuple_active |=
4288                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4289         }
4290
4291         /* roce_type is used to filter roce frames
4292          * dst_vport is used to specify the rule
4293          */
4294         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4295
4296         ret = hclge_get_fd_allocation(hdev,
4297                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4298                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4299                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4300                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4301         if (ret)
4302                 return ret;
4303
4304         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4305 }
4306
4307 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4308                                 int loc, u8 *key, bool is_add)
4309 {
4310         struct hclge_fd_tcam_config_1_cmd *req1;
4311         struct hclge_fd_tcam_config_2_cmd *req2;
4312         struct hclge_fd_tcam_config_3_cmd *req3;
4313         struct hclge_desc desc[3];
4314         int ret;
4315
4316         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4317         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4318         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4319         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4320         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4321
4322         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4323         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4324         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4325
4326         req1->stage = stage;
4327         req1->xy_sel = sel_x ? 1 : 0;
4328         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4329         req1->index = cpu_to_le32(loc);
4330         req1->entry_vld = sel_x ? is_add : 0;
4331
4332         if (key) {
4333                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4334                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4335                        sizeof(req2->tcam_data));
4336                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4337                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4338         }
4339
4340         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4341         if (ret)
4342                 dev_err(&hdev->pdev->dev,
4343                         "config tcam key fail, ret=%d\n",
4344                         ret);
4345
4346         return ret;
4347 }
4348
4349 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4350                               struct hclge_fd_ad_data *action)
4351 {
4352         struct hclge_fd_ad_config_cmd *req;
4353         struct hclge_desc desc;
4354         u64 ad_data = 0;
4355         int ret;
4356
4357         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4358
4359         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4360         req->index = cpu_to_le32(loc);
4361         req->stage = stage;
4362
4363         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4364                       action->write_rule_id_to_bd);
4365         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4366                         action->rule_id);
4367         ad_data <<= 32;
4368         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4369         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4370                       action->forward_to_direct_queue);
4371         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4372                         action->queue_id);
4373         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4374         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4375                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4376         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4377         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4378                         action->counter_id);
4379
4380         req->ad_data = cpu_to_le64(ad_data);
4381         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4382         if (ret)
4383                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4384
4385         return ret;
4386 }
4387
4388 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4389                                    struct hclge_fd_rule *rule)
4390 {
4391         u16 tmp_x_s, tmp_y_s;
4392         u32 tmp_x_l, tmp_y_l;
4393         int i;
4394
4395         if (rule->unused_tuple & tuple_bit)
4396                 return true;
4397
4398         switch (tuple_bit) {
4399         case 0:
4400                 return false;
4401         case BIT(INNER_DST_MAC):
4402                 for (i = 0; i < 6; i++) {
4403                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4404                                rule->tuples_mask.dst_mac[i]);
4405                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4406                                rule->tuples_mask.dst_mac[i]);
4407                 }
4408
4409                 return true;
4410         case BIT(INNER_SRC_MAC):
4411                 for (i = 0; i < 6; i++) {
4412                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4413                                rule->tuples.src_mac[i]);
4414                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4415                                rule->tuples.src_mac[i]);
4416                 }
4417
4418                 return true;
4419         case BIT(INNER_VLAN_TAG_FST):
4420                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4421                        rule->tuples_mask.vlan_tag1);
4422                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4423                        rule->tuples_mask.vlan_tag1);
4424                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4425                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4426
4427                 return true;
4428         case BIT(INNER_ETH_TYPE):
4429                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4430                        rule->tuples_mask.ether_proto);
4431                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4432                        rule->tuples_mask.ether_proto);
4433                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4434                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4435
4436                 return true;
4437         case BIT(INNER_IP_TOS):
4438                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4439                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4440
4441                 return true;
4442         case BIT(INNER_IP_PROTO):
4443                 calc_x(*key_x, rule->tuples.ip_proto,
4444                        rule->tuples_mask.ip_proto);
4445                 calc_y(*key_y, rule->tuples.ip_proto,
4446                        rule->tuples_mask.ip_proto);
4447
4448                 return true;
4449         case BIT(INNER_SRC_IP):
4450                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4451                        rule->tuples_mask.src_ip[3]);
4452                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4453                        rule->tuples_mask.src_ip[3]);
4454                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4455                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4456
4457                 return true;
4458         case BIT(INNER_DST_IP):
4459                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4460                        rule->tuples_mask.dst_ip[3]);
4461                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4462                        rule->tuples_mask.dst_ip[3]);
4463                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4464                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4465
4466                 return true;
4467         case BIT(INNER_SRC_PORT):
4468                 calc_x(tmp_x_s, rule->tuples.src_port,
4469                        rule->tuples_mask.src_port);
4470                 calc_y(tmp_y_s, rule->tuples.src_port,
4471                        rule->tuples_mask.src_port);
4472                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4473                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4474
4475                 return true;
4476         case BIT(INNER_DST_PORT):
4477                 calc_x(tmp_x_s, rule->tuples.dst_port,
4478                        rule->tuples_mask.dst_port);
4479                 calc_y(tmp_y_s, rule->tuples.dst_port,
4480                        rule->tuples_mask.dst_port);
4481                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4482                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4483
4484                 return true;
4485         default:
4486                 return false;
4487         }
4488 }
4489
4490 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4491                                  u8 vf_id, u8 network_port_id)
4492 {
4493         u32 port_number = 0;
4494
4495         if (port_type == HOST_PORT) {
4496                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4497                                 pf_id);
4498                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4499                                 vf_id);
4500                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4501         } else {
4502                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4503                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4504                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4505         }
4506
4507         return port_number;
4508 }
4509
4510 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4511                                        __le32 *key_x, __le32 *key_y,
4512                                        struct hclge_fd_rule *rule)
4513 {
4514         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4515         u8 cur_pos = 0, tuple_size, shift_bits;
4516         int i;
4517
4518         for (i = 0; i < MAX_META_DATA; i++) {
4519                 tuple_size = meta_data_key_info[i].key_length;
4520                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4521
4522                 switch (tuple_bit) {
4523                 case BIT(ROCE_TYPE):
4524                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4525                         cur_pos += tuple_size;
4526                         break;
4527                 case BIT(DST_VPORT):
4528                         port_number = hclge_get_port_number(HOST_PORT, 0,
4529                                                             rule->vf_id, 0);
4530                         hnae3_set_field(meta_data,
4531                                         GENMASK(cur_pos + tuple_size, cur_pos),
4532                                         cur_pos, port_number);
4533                         cur_pos += tuple_size;
4534                         break;
4535                 default:
4536                         break;
4537                 }
4538         }
4539
4540         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4541         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4542         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4543
4544         *key_x = cpu_to_le32(tmp_x << shift_bits);
4545         *key_y = cpu_to_le32(tmp_y << shift_bits);
4546 }
4547
4548 /* A complete key is combined with meta data key and tuple key.
4549  * Meta data key is stored at the MSB region, and tuple key is stored at
4550  * the LSB region, unused bits will be filled 0.
4551  */
4552 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4553                             struct hclge_fd_rule *rule)
4554 {
4555         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4556         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4557         u8 *cur_key_x, *cur_key_y;
4558         int i, ret, tuple_size;
4559         u8 meta_data_region;
4560
4561         memset(key_x, 0, sizeof(key_x));
4562         memset(key_y, 0, sizeof(key_y));
4563         cur_key_x = key_x;
4564         cur_key_y = key_y;
4565
4566         for (i = 0 ; i < MAX_TUPLE; i++) {
4567                 bool tuple_valid;
4568                 u32 check_tuple;
4569
4570                 tuple_size = tuple_key_info[i].key_length / 8;
4571                 check_tuple = key_cfg->tuple_active & BIT(i);
4572
4573                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4574                                                      cur_key_y, rule);
4575                 if (tuple_valid) {
4576                         cur_key_x += tuple_size;
4577                         cur_key_y += tuple_size;
4578                 }
4579         }
4580
4581         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4582                         MAX_META_DATA_LENGTH / 8;
4583
4584         hclge_fd_convert_meta_data(key_cfg,
4585                                    (__le32 *)(key_x + meta_data_region),
4586                                    (__le32 *)(key_y + meta_data_region),
4587                                    rule);
4588
4589         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4590                                    true);
4591         if (ret) {
4592                 dev_err(&hdev->pdev->dev,
4593                         "fd key_y config fail, loc=%d, ret=%d\n",
4594                         rule->queue_id, ret);
4595                 return ret;
4596         }
4597
4598         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4599                                    true);
4600         if (ret)
4601                 dev_err(&hdev->pdev->dev,
4602                         "fd key_x config fail, loc=%d, ret=%d\n",
4603                         rule->queue_id, ret);
4604         return ret;
4605 }
4606
4607 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4608                                struct hclge_fd_rule *rule)
4609 {
4610         struct hclge_fd_ad_data ad_data;
4611
4612         ad_data.ad_id = rule->location;
4613
4614         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4615                 ad_data.drop_packet = true;
4616                 ad_data.forward_to_direct_queue = false;
4617                 ad_data.queue_id = 0;
4618         } else {
4619                 ad_data.drop_packet = false;
4620                 ad_data.forward_to_direct_queue = true;
4621                 ad_data.queue_id = rule->queue_id;
4622         }
4623
4624         ad_data.use_counter = false;
4625         ad_data.counter_id = 0;
4626
4627         ad_data.use_next_stage = false;
4628         ad_data.next_input_key = 0;
4629
4630         ad_data.write_rule_id_to_bd = true;
4631         ad_data.rule_id = rule->location;
4632
4633         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4634 }
4635
4636 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4637                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4638 {
4639         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4640         struct ethtool_usrip4_spec *usr_ip4_spec;
4641         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4642         struct ethtool_usrip6_spec *usr_ip6_spec;
4643         struct ethhdr *ether_spec;
4644
4645         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4646                 return -EINVAL;
4647
4648         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4649                 return -EOPNOTSUPP;
4650
4651         if ((fs->flow_type & FLOW_EXT) &&
4652             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4653                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4654                 return -EOPNOTSUPP;
4655         }
4656
4657         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4658         case SCTP_V4_FLOW:
4659         case TCP_V4_FLOW:
4660         case UDP_V4_FLOW:
4661                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4662                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4663
4664                 if (!tcp_ip4_spec->ip4src)
4665                         *unused |= BIT(INNER_SRC_IP);
4666
4667                 if (!tcp_ip4_spec->ip4dst)
4668                         *unused |= BIT(INNER_DST_IP);
4669
4670                 if (!tcp_ip4_spec->psrc)
4671                         *unused |= BIT(INNER_SRC_PORT);
4672
4673                 if (!tcp_ip4_spec->pdst)
4674                         *unused |= BIT(INNER_DST_PORT);
4675
4676                 if (!tcp_ip4_spec->tos)
4677                         *unused |= BIT(INNER_IP_TOS);
4678
4679                 break;
4680         case IP_USER_FLOW:
4681                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4682                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4683                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4684
4685                 if (!usr_ip4_spec->ip4src)
4686                         *unused |= BIT(INNER_SRC_IP);
4687
4688                 if (!usr_ip4_spec->ip4dst)
4689                         *unused |= BIT(INNER_DST_IP);
4690
4691                 if (!usr_ip4_spec->tos)
4692                         *unused |= BIT(INNER_IP_TOS);
4693
4694                 if (!usr_ip4_spec->proto)
4695                         *unused |= BIT(INNER_IP_PROTO);
4696
4697                 if (usr_ip4_spec->l4_4_bytes)
4698                         return -EOPNOTSUPP;
4699
4700                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4701                         return -EOPNOTSUPP;
4702
4703                 break;
4704         case SCTP_V6_FLOW:
4705         case TCP_V6_FLOW:
4706         case UDP_V6_FLOW:
4707                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4708                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4709                         BIT(INNER_IP_TOS);
4710
4711                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4712                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4713                         *unused |= BIT(INNER_SRC_IP);
4714
4715                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4716                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4717                         *unused |= BIT(INNER_DST_IP);
4718
4719                 if (!tcp_ip6_spec->psrc)
4720                         *unused |= BIT(INNER_SRC_PORT);
4721
4722                 if (!tcp_ip6_spec->pdst)
4723                         *unused |= BIT(INNER_DST_PORT);
4724
4725                 if (tcp_ip6_spec->tclass)
4726                         return -EOPNOTSUPP;
4727
4728                 break;
4729         case IPV6_USER_FLOW:
4730                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4731                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4732                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4733                         BIT(INNER_DST_PORT);
4734
4735                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4736                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4737                         *unused |= BIT(INNER_SRC_IP);
4738
4739                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4740                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4741                         *unused |= BIT(INNER_DST_IP);
4742
4743                 if (!usr_ip6_spec->l4_proto)
4744                         *unused |= BIT(INNER_IP_PROTO);
4745
4746                 if (usr_ip6_spec->tclass)
4747                         return -EOPNOTSUPP;
4748
4749                 if (usr_ip6_spec->l4_4_bytes)
4750                         return -EOPNOTSUPP;
4751
4752                 break;
4753         case ETHER_FLOW:
4754                 ether_spec = &fs->h_u.ether_spec;
4755                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4756                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4757                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4758
4759                 if (is_zero_ether_addr(ether_spec->h_source))
4760                         *unused |= BIT(INNER_SRC_MAC);
4761
4762                 if (is_zero_ether_addr(ether_spec->h_dest))
4763                         *unused |= BIT(INNER_DST_MAC);
4764
4765                 if (!ether_spec->h_proto)
4766                         *unused |= BIT(INNER_ETH_TYPE);
4767
4768                 break;
4769         default:
4770                 return -EOPNOTSUPP;
4771         }
4772
4773         if ((fs->flow_type & FLOW_EXT)) {
4774                 if (fs->h_ext.vlan_etype)
4775                         return -EOPNOTSUPP;
4776                 if (!fs->h_ext.vlan_tci)
4777                         *unused |= BIT(INNER_VLAN_TAG_FST);
4778
4779                 if (fs->m_ext.vlan_tci) {
4780                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4781                                 return -EINVAL;
4782                 }
4783         } else {
4784                 *unused |= BIT(INNER_VLAN_TAG_FST);
4785         }
4786
4787         if (fs->flow_type & FLOW_MAC_EXT) {
4788                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4789                         return -EOPNOTSUPP;
4790
4791                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4792                         *unused |= BIT(INNER_DST_MAC);
4793                 else
4794                         *unused &= ~(BIT(INNER_DST_MAC));
4795         }
4796
4797         return 0;
4798 }
4799
4800 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4801 {
4802         struct hclge_fd_rule *rule = NULL;
4803         struct hlist_node *node2;
4804
4805         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4806                 if (rule->location >= location)
4807                         break;
4808         }
4809
4810         return  rule && rule->location == location;
4811 }
4812
4813 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4814                                      struct hclge_fd_rule *new_rule,
4815                                      u16 location,
4816                                      bool is_add)
4817 {
4818         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4819         struct hlist_node *node2;
4820
4821         if (is_add && !new_rule)
4822                 return -EINVAL;
4823
4824         hlist_for_each_entry_safe(rule, node2,
4825                                   &hdev->fd_rule_list, rule_node) {
4826                 if (rule->location >= location)
4827                         break;
4828                 parent = rule;
4829         }
4830
4831         if (rule && rule->location == location) {
4832                 hlist_del(&rule->rule_node);
4833                 kfree(rule);
4834                 hdev->hclge_fd_rule_num--;
4835
4836                 if (!is_add)
4837                         return 0;
4838
4839         } else if (!is_add) {
4840                 dev_err(&hdev->pdev->dev,
4841                         "delete fail, rule %d is inexistent\n",
4842                         location);
4843                 return -EINVAL;
4844         }
4845
4846         INIT_HLIST_NODE(&new_rule->rule_node);
4847
4848         if (parent)
4849                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4850         else
4851                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4852
4853         hdev->hclge_fd_rule_num++;
4854
4855         return 0;
4856 }
4857
4858 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4859                               struct ethtool_rx_flow_spec *fs,
4860                               struct hclge_fd_rule *rule)
4861 {
4862         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4863
4864         switch (flow_type) {
4865         case SCTP_V4_FLOW:
4866         case TCP_V4_FLOW:
4867         case UDP_V4_FLOW:
4868                 rule->tuples.src_ip[3] =
4869                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4870                 rule->tuples_mask.src_ip[3] =
4871                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4872
4873                 rule->tuples.dst_ip[3] =
4874                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4875                 rule->tuples_mask.dst_ip[3] =
4876                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4877
4878                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4879                 rule->tuples_mask.src_port =
4880                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4881
4882                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4883                 rule->tuples_mask.dst_port =
4884                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4885
4886                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4887                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4888
4889                 rule->tuples.ether_proto = ETH_P_IP;
4890                 rule->tuples_mask.ether_proto = 0xFFFF;
4891
4892                 break;
4893         case IP_USER_FLOW:
4894                 rule->tuples.src_ip[3] =
4895                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4896                 rule->tuples_mask.src_ip[3] =
4897                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4898
4899                 rule->tuples.dst_ip[3] =
4900                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4901                 rule->tuples_mask.dst_ip[3] =
4902                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4903
4904                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4905                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4906
4907                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4908                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4909
4910                 rule->tuples.ether_proto = ETH_P_IP;
4911                 rule->tuples_mask.ether_proto = 0xFFFF;
4912
4913                 break;
4914         case SCTP_V6_FLOW:
4915         case TCP_V6_FLOW:
4916         case UDP_V6_FLOW:
4917                 be32_to_cpu_array(rule->tuples.src_ip,
4918                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
4919                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4920                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
4921
4922                 be32_to_cpu_array(rule->tuples.dst_ip,
4923                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
4924                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4925                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
4926
4927                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4928                 rule->tuples_mask.src_port =
4929                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4930
4931                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4932                 rule->tuples_mask.dst_port =
4933                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4934
4935                 rule->tuples.ether_proto = ETH_P_IPV6;
4936                 rule->tuples_mask.ether_proto = 0xFFFF;
4937
4938                 break;
4939         case IPV6_USER_FLOW:
4940                 be32_to_cpu_array(rule->tuples.src_ip,
4941                                   fs->h_u.usr_ip6_spec.ip6src, 4);
4942                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4943                                   fs->m_u.usr_ip6_spec.ip6src, 4);
4944
4945                 be32_to_cpu_array(rule->tuples.dst_ip,
4946                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
4947                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4948                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
4949
4950                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4951                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4952
4953                 rule->tuples.ether_proto = ETH_P_IPV6;
4954                 rule->tuples_mask.ether_proto = 0xFFFF;
4955
4956                 break;
4957         case ETHER_FLOW:
4958                 ether_addr_copy(rule->tuples.src_mac,
4959                                 fs->h_u.ether_spec.h_source);
4960                 ether_addr_copy(rule->tuples_mask.src_mac,
4961                                 fs->m_u.ether_spec.h_source);
4962
4963                 ether_addr_copy(rule->tuples.dst_mac,
4964                                 fs->h_u.ether_spec.h_dest);
4965                 ether_addr_copy(rule->tuples_mask.dst_mac,
4966                                 fs->m_u.ether_spec.h_dest);
4967
4968                 rule->tuples.ether_proto =
4969                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4970                 rule->tuples_mask.ether_proto =
4971                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4972
4973                 break;
4974         default:
4975                 return -EOPNOTSUPP;
4976         }
4977
4978         switch (flow_type) {
4979         case SCTP_V4_FLOW:
4980         case SCTP_V6_FLOW:
4981                 rule->tuples.ip_proto = IPPROTO_SCTP;
4982                 rule->tuples_mask.ip_proto = 0xFF;
4983                 break;
4984         case TCP_V4_FLOW:
4985         case TCP_V6_FLOW:
4986                 rule->tuples.ip_proto = IPPROTO_TCP;
4987                 rule->tuples_mask.ip_proto = 0xFF;
4988                 break;
4989         case UDP_V4_FLOW:
4990         case UDP_V6_FLOW:
4991                 rule->tuples.ip_proto = IPPROTO_UDP;
4992                 rule->tuples_mask.ip_proto = 0xFF;
4993                 break;
4994         default:
4995                 break;
4996         }
4997
4998         if ((fs->flow_type & FLOW_EXT)) {
4999                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5000                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5001         }
5002
5003         if (fs->flow_type & FLOW_MAC_EXT) {
5004                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5005                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5006         }
5007
5008         return 0;
5009 }
5010
5011 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5012                               struct ethtool_rxnfc *cmd)
5013 {
5014         struct hclge_vport *vport = hclge_get_vport(handle);
5015         struct hclge_dev *hdev = vport->back;
5016         u16 dst_vport_id = 0, q_index = 0;
5017         struct ethtool_rx_flow_spec *fs;
5018         struct hclge_fd_rule *rule;
5019         u32 unused = 0;
5020         u8 action;
5021         int ret;
5022
5023         if (!hnae3_dev_fd_supported(hdev))
5024                 return -EOPNOTSUPP;
5025
5026         if (!hdev->fd_en) {
5027                 dev_warn(&hdev->pdev->dev,
5028                          "Please enable flow director first\n");
5029                 return -EOPNOTSUPP;
5030         }
5031
5032         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5033
5034         ret = hclge_fd_check_spec(hdev, fs, &unused);
5035         if (ret) {
5036                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5037                 return ret;
5038         }
5039
5040         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5041                 action = HCLGE_FD_ACTION_DROP_PACKET;
5042         } else {
5043                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5044                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5045                 u16 tqps;
5046
5047                 if (vf > hdev->num_req_vfs) {
5048                         dev_err(&hdev->pdev->dev,
5049                                 "Error: vf id (%d) > max vf num (%d)\n",
5050                                 vf, hdev->num_req_vfs);
5051                         return -EINVAL;
5052                 }
5053
5054                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5055                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5056
5057                 if (ring >= tqps) {
5058                         dev_err(&hdev->pdev->dev,
5059                                 "Error: queue id (%d) > max tqp num (%d)\n",
5060                                 ring, tqps - 1);
5061                         return -EINVAL;
5062                 }
5063
5064                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5065                 q_index = ring;
5066         }
5067
5068         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5069         if (!rule)
5070                 return -ENOMEM;
5071
5072         ret = hclge_fd_get_tuple(hdev, fs, rule);
5073         if (ret)
5074                 goto free_rule;
5075
5076         rule->flow_type = fs->flow_type;
5077
5078         rule->location = fs->location;
5079         rule->unused_tuple = unused;
5080         rule->vf_id = dst_vport_id;
5081         rule->queue_id = q_index;
5082         rule->action = action;
5083
5084         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5085         if (ret)
5086                 goto free_rule;
5087
5088         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5089         if (ret)
5090                 goto free_rule;
5091
5092         ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
5093         if (ret)
5094                 goto free_rule;
5095
5096         return ret;
5097
5098 free_rule:
5099         kfree(rule);
5100         return ret;
5101 }
5102
5103 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5104                               struct ethtool_rxnfc *cmd)
5105 {
5106         struct hclge_vport *vport = hclge_get_vport(handle);
5107         struct hclge_dev *hdev = vport->back;
5108         struct ethtool_rx_flow_spec *fs;
5109         int ret;
5110
5111         if (!hnae3_dev_fd_supported(hdev))
5112                 return -EOPNOTSUPP;
5113
5114         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5115
5116         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5117                 return -EINVAL;
5118
5119         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5120                 dev_err(&hdev->pdev->dev,
5121                         "Delete fail, rule %d is inexistent\n",
5122                         fs->location);
5123                 return -ENOENT;
5124         }
5125
5126         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5127                                    fs->location, NULL, false);
5128         if (ret)
5129                 return ret;
5130
5131         return hclge_fd_update_rule_list(hdev, NULL, fs->location,
5132                                          false);
5133 }
5134
5135 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5136                                      bool clear_list)
5137 {
5138         struct hclge_vport *vport = hclge_get_vport(handle);
5139         struct hclge_dev *hdev = vport->back;
5140         struct hclge_fd_rule *rule;
5141         struct hlist_node *node;
5142
5143         if (!hnae3_dev_fd_supported(hdev))
5144                 return;
5145
5146         if (clear_list) {
5147                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5148                                           rule_node) {
5149                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5150                                              rule->location, NULL, false);
5151                         hlist_del(&rule->rule_node);
5152                         kfree(rule);
5153                         hdev->hclge_fd_rule_num--;
5154                 }
5155         } else {
5156                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5157                                           rule_node)
5158                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5159                                              rule->location, NULL, false);
5160         }
5161 }
5162
5163 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5164 {
5165         struct hclge_vport *vport = hclge_get_vport(handle);
5166         struct hclge_dev *hdev = vport->back;
5167         struct hclge_fd_rule *rule;
5168         struct hlist_node *node;
5169         int ret;
5170
5171         /* Return ok here, because reset error handling will check this
5172          * return value. If error is returned here, the reset process will
5173          * fail.
5174          */
5175         if (!hnae3_dev_fd_supported(hdev))
5176                 return 0;
5177
5178         /* if fd is disabled, should not restore it when reset */
5179         if (!hdev->fd_en)
5180                 return 0;
5181
5182         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5183                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5184                 if (!ret)
5185                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5186
5187                 if (ret) {
5188                         dev_warn(&hdev->pdev->dev,
5189                                  "Restore rule %d failed, remove it\n",
5190                                  rule->location);
5191                         hlist_del(&rule->rule_node);
5192                         kfree(rule);
5193                         hdev->hclge_fd_rule_num--;
5194                 }
5195         }
5196         return 0;
5197 }
5198
5199 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5200                                  struct ethtool_rxnfc *cmd)
5201 {
5202         struct hclge_vport *vport = hclge_get_vport(handle);
5203         struct hclge_dev *hdev = vport->back;
5204
5205         if (!hnae3_dev_fd_supported(hdev))
5206                 return -EOPNOTSUPP;
5207
5208         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5209         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5210
5211         return 0;
5212 }
5213
5214 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5215                                   struct ethtool_rxnfc *cmd)
5216 {
5217         struct hclge_vport *vport = hclge_get_vport(handle);
5218         struct hclge_fd_rule *rule = NULL;
5219         struct hclge_dev *hdev = vport->back;
5220         struct ethtool_rx_flow_spec *fs;
5221         struct hlist_node *node2;
5222
5223         if (!hnae3_dev_fd_supported(hdev))
5224                 return -EOPNOTSUPP;
5225
5226         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5227
5228         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5229                 if (rule->location >= fs->location)
5230                         break;
5231         }
5232
5233         if (!rule || fs->location != rule->location)
5234                 return -ENOENT;
5235
5236         fs->flow_type = rule->flow_type;
5237         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5238         case SCTP_V4_FLOW:
5239         case TCP_V4_FLOW:
5240         case UDP_V4_FLOW:
5241                 fs->h_u.tcp_ip4_spec.ip4src =
5242                                 cpu_to_be32(rule->tuples.src_ip[3]);
5243                 fs->m_u.tcp_ip4_spec.ip4src =
5244                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5245                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5246
5247                 fs->h_u.tcp_ip4_spec.ip4dst =
5248                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5249                 fs->m_u.tcp_ip4_spec.ip4dst =
5250                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5251                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5252
5253                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5254                 fs->m_u.tcp_ip4_spec.psrc =
5255                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5256                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5257
5258                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5259                 fs->m_u.tcp_ip4_spec.pdst =
5260                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5261                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5262
5263                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5264                 fs->m_u.tcp_ip4_spec.tos =
5265                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5266                                 0 : rule->tuples_mask.ip_tos;
5267
5268                 break;
5269         case IP_USER_FLOW:
5270                 fs->h_u.usr_ip4_spec.ip4src =
5271                                 cpu_to_be32(rule->tuples.src_ip[3]);
5272                 fs->m_u.tcp_ip4_spec.ip4src =
5273                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5274                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5275
5276                 fs->h_u.usr_ip4_spec.ip4dst =
5277                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5278                 fs->m_u.usr_ip4_spec.ip4dst =
5279                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5280                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5281
5282                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5283                 fs->m_u.usr_ip4_spec.tos =
5284                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5285                                 0 : rule->tuples_mask.ip_tos;
5286
5287                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5288                 fs->m_u.usr_ip4_spec.proto =
5289                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5290                                 0 : rule->tuples_mask.ip_proto;
5291
5292                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5293
5294                 break;
5295         case SCTP_V6_FLOW:
5296         case TCP_V6_FLOW:
5297         case UDP_V6_FLOW:
5298                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5299                                   rule->tuples.src_ip, 4);
5300                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5301                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5302                 else
5303                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5304                                           rule->tuples_mask.src_ip, 4);
5305
5306                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5307                                   rule->tuples.dst_ip, 4);
5308                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5309                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5310                 else
5311                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5312                                           rule->tuples_mask.dst_ip, 4);
5313
5314                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5315                 fs->m_u.tcp_ip6_spec.psrc =
5316                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5317                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5318
5319                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5320                 fs->m_u.tcp_ip6_spec.pdst =
5321                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5322                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5323
5324                 break;
5325         case IPV6_USER_FLOW:
5326                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5327                                   rule->tuples.src_ip, 4);
5328                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5329                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5330                 else
5331                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5332                                           rule->tuples_mask.src_ip, 4);
5333
5334                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5335                                   rule->tuples.dst_ip, 4);
5336                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5337                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5338                 else
5339                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5340                                           rule->tuples_mask.dst_ip, 4);
5341
5342                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5343                 fs->m_u.usr_ip6_spec.l4_proto =
5344                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5345                                 0 : rule->tuples_mask.ip_proto;
5346
5347                 break;
5348         case ETHER_FLOW:
5349                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5350                                 rule->tuples.src_mac);
5351                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5352                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5353                 else
5354                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5355                                         rule->tuples_mask.src_mac);
5356
5357                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5358                                 rule->tuples.dst_mac);
5359                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5360                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5361                 else
5362                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5363                                         rule->tuples_mask.dst_mac);
5364
5365                 fs->h_u.ether_spec.h_proto =
5366                                 cpu_to_be16(rule->tuples.ether_proto);
5367                 fs->m_u.ether_spec.h_proto =
5368                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5369                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5370
5371                 break;
5372         default:
5373                 return -EOPNOTSUPP;
5374         }
5375
5376         if (fs->flow_type & FLOW_EXT) {
5377                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5378                 fs->m_ext.vlan_tci =
5379                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5380                                 cpu_to_be16(VLAN_VID_MASK) :
5381                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5382         }
5383
5384         if (fs->flow_type & FLOW_MAC_EXT) {
5385                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5386                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5387                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5388                 else
5389                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5390                                         rule->tuples_mask.dst_mac);
5391         }
5392
5393         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5394                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5395         } else {
5396                 u64 vf_id;
5397
5398                 fs->ring_cookie = rule->queue_id;
5399                 vf_id = rule->vf_id;
5400                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5401                 fs->ring_cookie |= vf_id;
5402         }
5403
5404         return 0;
5405 }
5406
5407 static int hclge_get_all_rules(struct hnae3_handle *handle,
5408                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5409 {
5410         struct hclge_vport *vport = hclge_get_vport(handle);
5411         struct hclge_dev *hdev = vport->back;
5412         struct hclge_fd_rule *rule;
5413         struct hlist_node *node2;
5414         int cnt = 0;
5415
5416         if (!hnae3_dev_fd_supported(hdev))
5417                 return -EOPNOTSUPP;
5418
5419         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5420
5421         hlist_for_each_entry_safe(rule, node2,
5422                                   &hdev->fd_rule_list, rule_node) {
5423                 if (cnt == cmd->rule_cnt)
5424                         return -EMSGSIZE;
5425
5426                 rule_locs[cnt] = rule->location;
5427                 cnt++;
5428         }
5429
5430         cmd->rule_cnt = cnt;
5431
5432         return 0;
5433 }
5434
5435 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5436 {
5437         struct hclge_vport *vport = hclge_get_vport(handle);
5438         struct hclge_dev *hdev = vport->back;
5439
5440         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5441                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5442 }
5443
5444 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5445 {
5446         struct hclge_vport *vport = hclge_get_vport(handle);
5447         struct hclge_dev *hdev = vport->back;
5448
5449         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5450 }
5451
5452 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5453 {
5454         struct hclge_vport *vport = hclge_get_vport(handle);
5455         struct hclge_dev *hdev = vport->back;
5456
5457         return hdev->rst_stats.hw_reset_done_cnt;
5458 }
5459
5460 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5461 {
5462         struct hclge_vport *vport = hclge_get_vport(handle);
5463         struct hclge_dev *hdev = vport->back;
5464
5465         hdev->fd_en = enable;
5466         if (!enable)
5467                 hclge_del_all_fd_entries(handle, false);
5468         else
5469                 hclge_restore_fd_entries(handle);
5470 }
5471
5472 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5473 {
5474         struct hclge_desc desc;
5475         struct hclge_config_mac_mode_cmd *req =
5476                 (struct hclge_config_mac_mode_cmd *)desc.data;
5477         u32 loop_en = 0;
5478         int ret;
5479
5480         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5481         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5482         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5483         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5484         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5485         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5486         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5487         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5488         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5489         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5490         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5491         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5492         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5493         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5494         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5495         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5496
5497         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5498         if (ret)
5499                 dev_err(&hdev->pdev->dev,
5500                         "mac enable fail, ret =%d.\n", ret);
5501 }
5502
5503 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5504 {
5505         struct hclge_config_mac_mode_cmd *req;
5506         struct hclge_desc desc;
5507         u32 loop_en;
5508         int ret;
5509
5510         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5511         /* 1 Read out the MAC mode config at first */
5512         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5513         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5514         if (ret) {
5515                 dev_err(&hdev->pdev->dev,
5516                         "mac loopback get fail, ret =%d.\n", ret);
5517                 return ret;
5518         }
5519
5520         /* 2 Then setup the loopback flag */
5521         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5522         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5523         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5524         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5525
5526         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5527
5528         /* 3 Config mac work mode with loopback flag
5529          * and its original configure parameters
5530          */
5531         hclge_cmd_reuse_desc(&desc, false);
5532         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5533         if (ret)
5534                 dev_err(&hdev->pdev->dev,
5535                         "mac loopback set fail, ret =%d.\n", ret);
5536         return ret;
5537 }
5538
5539 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5540                                      enum hnae3_loop loop_mode)
5541 {
5542 #define HCLGE_SERDES_RETRY_MS   10
5543 #define HCLGE_SERDES_RETRY_NUM  100
5544
5545 #define HCLGE_MAC_LINK_STATUS_MS   10
5546 #define HCLGE_MAC_LINK_STATUS_NUM  100
5547 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5548 #define HCLGE_MAC_LINK_STATUS_UP   1
5549
5550         struct hclge_serdes_lb_cmd *req;
5551         struct hclge_desc desc;
5552         int mac_link_ret = 0;
5553         int ret, i = 0;
5554         u8 loop_mode_b;
5555
5556         req = (struct hclge_serdes_lb_cmd *)desc.data;
5557         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5558
5559         switch (loop_mode) {
5560         case HNAE3_LOOP_SERIAL_SERDES:
5561                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5562                 break;
5563         case HNAE3_LOOP_PARALLEL_SERDES:
5564                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5565                 break;
5566         default:
5567                 dev_err(&hdev->pdev->dev,
5568                         "unsupported serdes loopback mode %d\n", loop_mode);
5569                 return -ENOTSUPP;
5570         }
5571
5572         if (en) {
5573                 req->enable = loop_mode_b;
5574                 req->mask = loop_mode_b;
5575                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5576         } else {
5577                 req->mask = loop_mode_b;
5578                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5579         }
5580
5581         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5582         if (ret) {
5583                 dev_err(&hdev->pdev->dev,
5584                         "serdes loopback set fail, ret = %d\n", ret);
5585                 return ret;
5586         }
5587
5588         do {
5589                 msleep(HCLGE_SERDES_RETRY_MS);
5590                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5591                                            true);
5592                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5593                 if (ret) {
5594                         dev_err(&hdev->pdev->dev,
5595                                 "serdes loopback get, ret = %d\n", ret);
5596                         return ret;
5597                 }
5598         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5599                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5600
5601         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5602                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5603                 return -EBUSY;
5604         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5605                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5606                 return -EIO;
5607         }
5608
5609         hclge_cfg_mac_mode(hdev, en);
5610
5611         i = 0;
5612         do {
5613                 /* serdes Internal loopback, independent of the network cable.*/
5614                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5615                 ret = hclge_get_mac_link_status(hdev);
5616                 if (ret == mac_link_ret)
5617                         return 0;
5618         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5619
5620         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5621
5622         return -EBUSY;
5623 }
5624
5625 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5626                             int stream_id, bool enable)
5627 {
5628         struct hclge_desc desc;
5629         struct hclge_cfg_com_tqp_queue_cmd *req =
5630                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5631         int ret;
5632
5633         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5634         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5635         req->stream_id = cpu_to_le16(stream_id);
5636         req->enable |= enable << HCLGE_TQP_ENABLE_B;
5637
5638         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5639         if (ret)
5640                 dev_err(&hdev->pdev->dev,
5641                         "Tqp enable fail, status =%d.\n", ret);
5642         return ret;
5643 }
5644
5645 static int hclge_set_loopback(struct hnae3_handle *handle,
5646                               enum hnae3_loop loop_mode, bool en)
5647 {
5648         struct hclge_vport *vport = hclge_get_vport(handle);
5649         struct hnae3_knic_private_info *kinfo;
5650         struct hclge_dev *hdev = vport->back;
5651         int i, ret;
5652
5653         switch (loop_mode) {
5654         case HNAE3_LOOP_APP:
5655                 ret = hclge_set_app_loopback(hdev, en);
5656                 break;
5657         case HNAE3_LOOP_SERIAL_SERDES:
5658         case HNAE3_LOOP_PARALLEL_SERDES:
5659                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5660                 break;
5661         default:
5662                 ret = -ENOTSUPP;
5663                 dev_err(&hdev->pdev->dev,
5664                         "loop_mode %d is not supported\n", loop_mode);
5665                 break;
5666         }
5667
5668         if (ret)
5669                 return ret;
5670
5671         kinfo = &vport->nic.kinfo;
5672         for (i = 0; i < kinfo->num_tqps; i++) {
5673                 ret = hclge_tqp_enable(hdev, i, 0, en);
5674                 if (ret)
5675                         return ret;
5676         }
5677
5678         return 0;
5679 }
5680
5681 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5682 {
5683         struct hclge_vport *vport = hclge_get_vport(handle);
5684         struct hnae3_knic_private_info *kinfo;
5685         struct hnae3_queue *queue;
5686         struct hclge_tqp *tqp;
5687         int i;
5688
5689         kinfo = &vport->nic.kinfo;
5690         for (i = 0; i < kinfo->num_tqps; i++) {
5691                 queue = handle->kinfo.tqp[i];
5692                 tqp = container_of(queue, struct hclge_tqp, q);
5693                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5694         }
5695 }
5696
5697 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5698 {
5699         struct hclge_vport *vport = hclge_get_vport(handle);
5700         struct hclge_dev *hdev = vport->back;
5701
5702         if (enable) {
5703                 mod_timer(&hdev->service_timer, jiffies + HZ);
5704         } else {
5705                 del_timer_sync(&hdev->service_timer);
5706                 cancel_work_sync(&hdev->service_task);
5707                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5708         }
5709 }
5710
5711 static int hclge_ae_start(struct hnae3_handle *handle)
5712 {
5713         struct hclge_vport *vport = hclge_get_vport(handle);
5714         struct hclge_dev *hdev = vport->back;
5715
5716         /* mac enable */
5717         hclge_cfg_mac_mode(hdev, true);
5718         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5719         hdev->hw.mac.link = 0;
5720
5721         /* reset tqp stats */
5722         hclge_reset_tqp_stats(handle);
5723
5724         hclge_mac_start_phy(hdev);
5725
5726         return 0;
5727 }
5728
5729 static void hclge_ae_stop(struct hnae3_handle *handle)
5730 {
5731         struct hclge_vport *vport = hclge_get_vport(handle);
5732         struct hclge_dev *hdev = vport->back;
5733         int i;
5734
5735         set_bit(HCLGE_STATE_DOWN, &hdev->state);
5736
5737         /* If it is not PF reset, the firmware will disable the MAC,
5738          * so it only need to stop phy here.
5739          */
5740         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5741             hdev->reset_type != HNAE3_FUNC_RESET) {
5742                 hclge_mac_stop_phy(hdev);
5743                 return;
5744         }
5745
5746         for (i = 0; i < handle->kinfo.num_tqps; i++)
5747                 hclge_reset_tqp(handle, i);
5748
5749         /* Mac disable */
5750         hclge_cfg_mac_mode(hdev, false);
5751
5752         hclge_mac_stop_phy(hdev);
5753
5754         /* reset tqp stats */
5755         hclge_reset_tqp_stats(handle);
5756         hclge_update_link_status(hdev);
5757 }
5758
5759 int hclge_vport_start(struct hclge_vport *vport)
5760 {
5761         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5762         vport->last_active_jiffies = jiffies;
5763         return 0;
5764 }
5765
5766 void hclge_vport_stop(struct hclge_vport *vport)
5767 {
5768         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5769 }
5770
5771 static int hclge_client_start(struct hnae3_handle *handle)
5772 {
5773         struct hclge_vport *vport = hclge_get_vport(handle);
5774
5775         return hclge_vport_start(vport);
5776 }
5777
5778 static void hclge_client_stop(struct hnae3_handle *handle)
5779 {
5780         struct hclge_vport *vport = hclge_get_vport(handle);
5781
5782         hclge_vport_stop(vport);
5783 }
5784
5785 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5786                                          u16 cmdq_resp, u8  resp_code,
5787                                          enum hclge_mac_vlan_tbl_opcode op)
5788 {
5789         struct hclge_dev *hdev = vport->back;
5790         int return_status = -EIO;
5791
5792         if (cmdq_resp) {
5793                 dev_err(&hdev->pdev->dev,
5794                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5795                         cmdq_resp);
5796                 return -EIO;
5797         }
5798
5799         if (op == HCLGE_MAC_VLAN_ADD) {
5800                 if ((!resp_code) || (resp_code == 1)) {
5801                         return_status = 0;
5802                 } else if (resp_code == 2) {
5803                         return_status = -ENOSPC;
5804                         dev_err(&hdev->pdev->dev,
5805                                 "add mac addr failed for uc_overflow.\n");
5806                 } else if (resp_code == 3) {
5807                         return_status = -ENOSPC;
5808                         dev_err(&hdev->pdev->dev,
5809                                 "add mac addr failed for mc_overflow.\n");
5810                 } else {
5811                         dev_err(&hdev->pdev->dev,
5812                                 "add mac addr failed for undefined, code=%d.\n",
5813                                 resp_code);
5814                 }
5815         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5816                 if (!resp_code) {
5817                         return_status = 0;
5818                 } else if (resp_code == 1) {
5819                         return_status = -ENOENT;
5820                         dev_dbg(&hdev->pdev->dev,
5821                                 "remove mac addr failed for miss.\n");
5822                 } else {
5823                         dev_err(&hdev->pdev->dev,
5824                                 "remove mac addr failed for undefined, code=%d.\n",
5825                                 resp_code);
5826                 }
5827         } else if (op == HCLGE_MAC_VLAN_LKUP) {
5828                 if (!resp_code) {
5829                         return_status = 0;
5830                 } else if (resp_code == 1) {
5831                         return_status = -ENOENT;
5832                         dev_dbg(&hdev->pdev->dev,
5833                                 "lookup mac addr failed for miss.\n");
5834                 } else {
5835                         dev_err(&hdev->pdev->dev,
5836                                 "lookup mac addr failed for undefined, code=%d.\n",
5837                                 resp_code);
5838                 }
5839         } else {
5840                 return_status = -EINVAL;
5841                 dev_err(&hdev->pdev->dev,
5842                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5843                         op);
5844         }
5845
5846         return return_status;
5847 }
5848
5849 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5850 {
5851         int word_num;
5852         int bit_num;
5853
5854         if (vfid > 255 || vfid < 0)
5855                 return -EIO;
5856
5857         if (vfid >= 0 && vfid <= 191) {
5858                 word_num = vfid / 32;
5859                 bit_num  = vfid % 32;
5860                 if (clr)
5861                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5862                 else
5863                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5864         } else {
5865                 word_num = (vfid - 192) / 32;
5866                 bit_num  = vfid % 32;
5867                 if (clr)
5868                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5869                 else
5870                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5871         }
5872
5873         return 0;
5874 }
5875
5876 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5877 {
5878 #define HCLGE_DESC_NUMBER 3
5879 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5880         int i, j;
5881
5882         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5883                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5884                         if (desc[i].data[j])
5885                                 return false;
5886
5887         return true;
5888 }
5889
5890 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5891                                    const u8 *addr, bool is_mc)
5892 {
5893         const unsigned char *mac_addr = addr;
5894         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5895                        (mac_addr[0]) | (mac_addr[1] << 8);
5896         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5897
5898         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5899         if (is_mc) {
5900                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5901                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5902         }
5903
5904         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5905         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5906 }
5907
5908 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5909                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
5910 {
5911         struct hclge_dev *hdev = vport->back;
5912         struct hclge_desc desc;
5913         u8 resp_code;
5914         u16 retval;
5915         int ret;
5916
5917         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5918
5919         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5920
5921         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5922         if (ret) {
5923                 dev_err(&hdev->pdev->dev,
5924                         "del mac addr failed for cmd_send, ret =%d.\n",
5925                         ret);
5926                 return ret;
5927         }
5928         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5929         retval = le16_to_cpu(desc.retval);
5930
5931         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5932                                              HCLGE_MAC_VLAN_REMOVE);
5933 }
5934
5935 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5936                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
5937                                      struct hclge_desc *desc,
5938                                      bool is_mc)
5939 {
5940         struct hclge_dev *hdev = vport->back;
5941         u8 resp_code;
5942         u16 retval;
5943         int ret;
5944
5945         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5946         if (is_mc) {
5947                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5948                 memcpy(desc[0].data,
5949                        req,
5950                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5951                 hclge_cmd_setup_basic_desc(&desc[1],
5952                                            HCLGE_OPC_MAC_VLAN_ADD,
5953                                            true);
5954                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5955                 hclge_cmd_setup_basic_desc(&desc[2],
5956                                            HCLGE_OPC_MAC_VLAN_ADD,
5957                                            true);
5958                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5959         } else {
5960                 memcpy(desc[0].data,
5961                        req,
5962                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5963                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5964         }
5965         if (ret) {
5966                 dev_err(&hdev->pdev->dev,
5967                         "lookup mac addr failed for cmd_send, ret =%d.\n",
5968                         ret);
5969                 return ret;
5970         }
5971         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5972         retval = le16_to_cpu(desc[0].retval);
5973
5974         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5975                                              HCLGE_MAC_VLAN_LKUP);
5976 }
5977
5978 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5979                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
5980                                   struct hclge_desc *mc_desc)
5981 {
5982         struct hclge_dev *hdev = vport->back;
5983         int cfg_status;
5984         u8 resp_code;
5985         u16 retval;
5986         int ret;
5987
5988         if (!mc_desc) {
5989                 struct hclge_desc desc;
5990
5991                 hclge_cmd_setup_basic_desc(&desc,
5992                                            HCLGE_OPC_MAC_VLAN_ADD,
5993                                            false);
5994                 memcpy(desc.data, req,
5995                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5996                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5997                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5998                 retval = le16_to_cpu(desc.retval);
5999
6000                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6001                                                            resp_code,
6002                                                            HCLGE_MAC_VLAN_ADD);
6003         } else {
6004                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6005                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6006                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6007                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6008                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6009                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6010                 memcpy(mc_desc[0].data, req,
6011                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6012                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6013                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6014                 retval = le16_to_cpu(mc_desc[0].retval);
6015
6016                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6017                                                            resp_code,
6018                                                            HCLGE_MAC_VLAN_ADD);
6019         }
6020
6021         if (ret) {
6022                 dev_err(&hdev->pdev->dev,
6023                         "add mac addr failed for cmd_send, ret =%d.\n",
6024                         ret);
6025                 return ret;
6026         }
6027
6028         return cfg_status;
6029 }
6030
6031 static int hclge_init_umv_space(struct hclge_dev *hdev)
6032 {
6033         u16 allocated_size = 0;
6034         int ret;
6035
6036         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6037                                   true);
6038         if (ret)
6039                 return ret;
6040
6041         if (allocated_size < hdev->wanted_umv_size)
6042                 dev_warn(&hdev->pdev->dev,
6043                          "Alloc umv space failed, want %d, get %d\n",
6044                          hdev->wanted_umv_size, allocated_size);
6045
6046         mutex_init(&hdev->umv_mutex);
6047         hdev->max_umv_size = allocated_size;
6048         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6049         hdev->share_umv_size = hdev->priv_umv_size +
6050                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6051
6052         return 0;
6053 }
6054
6055 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6056 {
6057         int ret;
6058
6059         if (hdev->max_umv_size > 0) {
6060                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6061                                           false);
6062                 if (ret)
6063                         return ret;
6064                 hdev->max_umv_size = 0;
6065         }
6066         mutex_destroy(&hdev->umv_mutex);
6067
6068         return 0;
6069 }
6070
6071 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6072                                u16 *allocated_size, bool is_alloc)
6073 {
6074         struct hclge_umv_spc_alc_cmd *req;
6075         struct hclge_desc desc;
6076         int ret;
6077
6078         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6079         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6080         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6081         req->space_size = cpu_to_le32(space_size);
6082
6083         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6084         if (ret) {
6085                 dev_err(&hdev->pdev->dev,
6086                         "%s umv space failed for cmd_send, ret =%d\n",
6087                         is_alloc ? "allocate" : "free", ret);
6088                 return ret;
6089         }
6090
6091         if (is_alloc && allocated_size)
6092                 *allocated_size = le32_to_cpu(desc.data[1]);
6093
6094         return 0;
6095 }
6096
6097 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6098 {
6099         struct hclge_vport *vport;
6100         int i;
6101
6102         for (i = 0; i < hdev->num_alloc_vport; i++) {
6103                 vport = &hdev->vport[i];
6104                 vport->used_umv_num = 0;
6105         }
6106
6107         mutex_lock(&hdev->umv_mutex);
6108         hdev->share_umv_size = hdev->priv_umv_size +
6109                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6110         mutex_unlock(&hdev->umv_mutex);
6111 }
6112
6113 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6114 {
6115         struct hclge_dev *hdev = vport->back;
6116         bool is_full;
6117
6118         mutex_lock(&hdev->umv_mutex);
6119         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6120                    hdev->share_umv_size == 0);
6121         mutex_unlock(&hdev->umv_mutex);
6122
6123         return is_full;
6124 }
6125
6126 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6127 {
6128         struct hclge_dev *hdev = vport->back;
6129
6130         mutex_lock(&hdev->umv_mutex);
6131         if (is_free) {
6132                 if (vport->used_umv_num > hdev->priv_umv_size)
6133                         hdev->share_umv_size++;
6134
6135                 if (vport->used_umv_num > 0)
6136                         vport->used_umv_num--;
6137         } else {
6138                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6139                     hdev->share_umv_size > 0)
6140                         hdev->share_umv_size--;
6141                 vport->used_umv_num++;
6142         }
6143         mutex_unlock(&hdev->umv_mutex);
6144 }
6145
6146 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6147                              const unsigned char *addr)
6148 {
6149         struct hclge_vport *vport = hclge_get_vport(handle);
6150
6151         return hclge_add_uc_addr_common(vport, addr);
6152 }
6153
6154 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6155                              const unsigned char *addr)
6156 {
6157         struct hclge_dev *hdev = vport->back;
6158         struct hclge_mac_vlan_tbl_entry_cmd req;
6159         struct hclge_desc desc;
6160         u16 egress_port = 0;
6161         int ret;
6162
6163         /* mac addr check */
6164         if (is_zero_ether_addr(addr) ||
6165             is_broadcast_ether_addr(addr) ||
6166             is_multicast_ether_addr(addr)) {
6167                 dev_err(&hdev->pdev->dev,
6168                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6169                          addr,
6170                          is_zero_ether_addr(addr),
6171                          is_broadcast_ether_addr(addr),
6172                          is_multicast_ether_addr(addr));
6173                 return -EINVAL;
6174         }
6175
6176         memset(&req, 0, sizeof(req));
6177
6178         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6179                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6180
6181         req.egress_port = cpu_to_le16(egress_port);
6182
6183         hclge_prepare_mac_addr(&req, addr, false);
6184
6185         /* Lookup the mac address in the mac_vlan table, and add
6186          * it if the entry is inexistent. Repeated unicast entry
6187          * is not allowed in the mac vlan table.
6188          */
6189         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6190         if (ret == -ENOENT) {
6191                 if (!hclge_is_umv_space_full(vport)) {
6192                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6193                         if (!ret)
6194                                 hclge_update_umv_space(vport, false);
6195                         return ret;
6196                 }
6197
6198                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6199                         hdev->priv_umv_size);
6200
6201                 return -ENOSPC;
6202         }
6203
6204         /* check if we just hit the duplicate */
6205         if (!ret) {
6206                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6207                          vport->vport_id, addr);
6208                 return 0;
6209         }
6210
6211         dev_err(&hdev->pdev->dev,
6212                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6213                 addr);
6214
6215         return ret;
6216 }
6217
6218 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6219                             const unsigned char *addr)
6220 {
6221         struct hclge_vport *vport = hclge_get_vport(handle);
6222
6223         return hclge_rm_uc_addr_common(vport, addr);
6224 }
6225
6226 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6227                             const unsigned char *addr)
6228 {
6229         struct hclge_dev *hdev = vport->back;
6230         struct hclge_mac_vlan_tbl_entry_cmd req;
6231         int ret;
6232
6233         /* mac addr check */
6234         if (is_zero_ether_addr(addr) ||
6235             is_broadcast_ether_addr(addr) ||
6236             is_multicast_ether_addr(addr)) {
6237                 dev_dbg(&hdev->pdev->dev,
6238                         "Remove mac err! invalid mac:%pM.\n",
6239                          addr);
6240                 return -EINVAL;
6241         }
6242
6243         memset(&req, 0, sizeof(req));
6244         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6245         hclge_prepare_mac_addr(&req, addr, false);
6246         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6247         if (!ret)
6248                 hclge_update_umv_space(vport, true);
6249
6250         return ret;
6251 }
6252
6253 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6254                              const unsigned char *addr)
6255 {
6256         struct hclge_vport *vport = hclge_get_vport(handle);
6257
6258         return hclge_add_mc_addr_common(vport, addr);
6259 }
6260
6261 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6262                              const unsigned char *addr)
6263 {
6264         struct hclge_dev *hdev = vport->back;
6265         struct hclge_mac_vlan_tbl_entry_cmd req;
6266         struct hclge_desc desc[3];
6267         int status;
6268
6269         /* mac addr check */
6270         if (!is_multicast_ether_addr(addr)) {
6271                 dev_err(&hdev->pdev->dev,
6272                         "Add mc mac err! invalid mac:%pM.\n",
6273                          addr);
6274                 return -EINVAL;
6275         }
6276         memset(&req, 0, sizeof(req));
6277         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6278         hclge_prepare_mac_addr(&req, addr, true);
6279         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6280         if (!status) {
6281                 /* This mac addr exist, update VFID for it */
6282                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6283                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6284         } else {
6285                 /* This mac addr do not exist, add new entry for it */
6286                 memset(desc[0].data, 0, sizeof(desc[0].data));
6287                 memset(desc[1].data, 0, sizeof(desc[0].data));
6288                 memset(desc[2].data, 0, sizeof(desc[0].data));
6289                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6290                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6291         }
6292
6293         if (status == -ENOSPC)
6294                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6295
6296         return status;
6297 }
6298
6299 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6300                             const unsigned char *addr)
6301 {
6302         struct hclge_vport *vport = hclge_get_vport(handle);
6303
6304         return hclge_rm_mc_addr_common(vport, addr);
6305 }
6306
6307 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6308                             const unsigned char *addr)
6309 {
6310         struct hclge_dev *hdev = vport->back;
6311         struct hclge_mac_vlan_tbl_entry_cmd req;
6312         enum hclge_cmd_status status;
6313         struct hclge_desc desc[3];
6314
6315         /* mac addr check */
6316         if (!is_multicast_ether_addr(addr)) {
6317                 dev_dbg(&hdev->pdev->dev,
6318                         "Remove mc mac err! invalid mac:%pM.\n",
6319                          addr);
6320                 return -EINVAL;
6321         }
6322
6323         memset(&req, 0, sizeof(req));
6324         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6325         hclge_prepare_mac_addr(&req, addr, true);
6326         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6327         if (!status) {
6328                 /* This mac addr exist, remove this handle's VFID for it */
6329                 hclge_update_desc_vfid(desc, vport->vport_id, true);
6330
6331                 if (hclge_is_all_function_id_zero(desc))
6332                         /* All the vfid is zero, so need to delete this entry */
6333                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6334                 else
6335                         /* Not all the vfid is zero, update the vfid */
6336                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6337
6338         } else {
6339                 /* Maybe this mac address is in mta table, but it cannot be
6340                  * deleted here because an entry of mta represents an address
6341                  * range rather than a specific address. the delete action to
6342                  * all entries will take effect in update_mta_status called by
6343                  * hns3_nic_set_rx_mode.
6344                  */
6345                 status = 0;
6346         }
6347
6348         return status;
6349 }
6350
6351 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6352                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6353 {
6354         struct hclge_vport_mac_addr_cfg *mac_cfg;
6355         struct list_head *list;
6356
6357         if (!vport->vport_id)
6358                 return;
6359
6360         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6361         if (!mac_cfg)
6362                 return;
6363
6364         mac_cfg->hd_tbl_status = true;
6365         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6366
6367         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6368                &vport->uc_mac_list : &vport->mc_mac_list;
6369
6370         list_add_tail(&mac_cfg->node, list);
6371 }
6372
6373 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6374                               bool is_write_tbl,
6375                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6376 {
6377         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6378         struct list_head *list;
6379         bool uc_flag, mc_flag;
6380
6381         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6382                &vport->uc_mac_list : &vport->mc_mac_list;
6383
6384         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6385         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6386
6387         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6388                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6389                         if (uc_flag && mac_cfg->hd_tbl_status)
6390                                 hclge_rm_uc_addr_common(vport, mac_addr);
6391
6392                         if (mc_flag && mac_cfg->hd_tbl_status)
6393                                 hclge_rm_mc_addr_common(vport, mac_addr);
6394
6395                         list_del(&mac_cfg->node);
6396                         kfree(mac_cfg);
6397                         break;
6398                 }
6399         }
6400 }
6401
6402 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6403                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6404 {
6405         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6406         struct list_head *list;
6407
6408         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6409                &vport->uc_mac_list : &vport->mc_mac_list;
6410
6411         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6412                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6413                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6414
6415                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6416                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6417
6418                 mac_cfg->hd_tbl_status = false;
6419                 if (is_del_list) {
6420                         list_del(&mac_cfg->node);
6421                         kfree(mac_cfg);
6422                 }
6423         }
6424 }
6425
6426 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6427 {
6428         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6429         struct hclge_vport *vport;
6430         int i;
6431
6432         mutex_lock(&hdev->vport_cfg_mutex);
6433         for (i = 0; i < hdev->num_alloc_vport; i++) {
6434                 vport = &hdev->vport[i];
6435                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6436                         list_del(&mac->node);
6437                         kfree(mac);
6438                 }
6439
6440                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6441                         list_del(&mac->node);
6442                         kfree(mac);
6443                 }
6444         }
6445         mutex_unlock(&hdev->vport_cfg_mutex);
6446 }
6447
6448 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6449                                               u16 cmdq_resp, u8 resp_code)
6450 {
6451 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6452 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6453 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6454 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6455
6456         int return_status;
6457
6458         if (cmdq_resp) {
6459                 dev_err(&hdev->pdev->dev,
6460                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6461                         cmdq_resp);
6462                 return -EIO;
6463         }
6464
6465         switch (resp_code) {
6466         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6467         case HCLGE_ETHERTYPE_ALREADY_ADD:
6468                 return_status = 0;
6469                 break;
6470         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6471                 dev_err(&hdev->pdev->dev,
6472                         "add mac ethertype failed for manager table overflow.\n");
6473                 return_status = -EIO;
6474                 break;
6475         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6476                 dev_err(&hdev->pdev->dev,
6477                         "add mac ethertype failed for key conflict.\n");
6478                 return_status = -EIO;
6479                 break;
6480         default:
6481                 dev_err(&hdev->pdev->dev,
6482                         "add mac ethertype failed for undefined, code=%d.\n",
6483                         resp_code);
6484                 return_status = -EIO;
6485         }
6486
6487         return return_status;
6488 }
6489
6490 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6491                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6492 {
6493         struct hclge_desc desc;
6494         u8 resp_code;
6495         u16 retval;
6496         int ret;
6497
6498         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6499         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6500
6501         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6502         if (ret) {
6503                 dev_err(&hdev->pdev->dev,
6504                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6505                         ret);
6506                 return ret;
6507         }
6508
6509         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6510         retval = le16_to_cpu(desc.retval);
6511
6512         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6513 }
6514
6515 static int init_mgr_tbl(struct hclge_dev *hdev)
6516 {
6517         int ret;
6518         int i;
6519
6520         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6521                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6522                 if (ret) {
6523                         dev_err(&hdev->pdev->dev,
6524                                 "add mac ethertype failed, ret =%d.\n",
6525                                 ret);
6526                         return ret;
6527                 }
6528         }
6529
6530         return 0;
6531 }
6532
6533 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6534 {
6535         struct hclge_vport *vport = hclge_get_vport(handle);
6536         struct hclge_dev *hdev = vport->back;
6537
6538         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6539 }
6540
6541 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6542                               bool is_first)
6543 {
6544         const unsigned char *new_addr = (const unsigned char *)p;
6545         struct hclge_vport *vport = hclge_get_vport(handle);
6546         struct hclge_dev *hdev = vport->back;
6547         int ret;
6548
6549         /* mac addr check */
6550         if (is_zero_ether_addr(new_addr) ||
6551             is_broadcast_ether_addr(new_addr) ||
6552             is_multicast_ether_addr(new_addr)) {
6553                 dev_err(&hdev->pdev->dev,
6554                         "Change uc mac err! invalid mac:%p.\n",
6555                          new_addr);
6556                 return -EINVAL;
6557         }
6558
6559         if ((!is_first || is_kdump_kernel()) &&
6560             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6561                 dev_warn(&hdev->pdev->dev,
6562                          "remove old uc mac address fail.\n");
6563
6564         ret = hclge_add_uc_addr(handle, new_addr);
6565         if (ret) {
6566                 dev_err(&hdev->pdev->dev,
6567                         "add uc mac address fail, ret =%d.\n",
6568                         ret);
6569
6570                 if (!is_first &&
6571                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6572                         dev_err(&hdev->pdev->dev,
6573                                 "restore uc mac address fail.\n");
6574
6575                 return -EIO;
6576         }
6577
6578         ret = hclge_pause_addr_cfg(hdev, new_addr);
6579         if (ret) {
6580                 dev_err(&hdev->pdev->dev,
6581                         "configure mac pause address fail, ret =%d.\n",
6582                         ret);
6583                 return -EIO;
6584         }
6585
6586         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6587
6588         return 0;
6589 }
6590
6591 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6592                           int cmd)
6593 {
6594         struct hclge_vport *vport = hclge_get_vport(handle);
6595         struct hclge_dev *hdev = vport->back;
6596
6597         if (!hdev->hw.mac.phydev)
6598                 return -EOPNOTSUPP;
6599
6600         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6601 }
6602
6603 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6604                                       u8 fe_type, bool filter_en, u8 vf_id)
6605 {
6606         struct hclge_vlan_filter_ctrl_cmd *req;
6607         struct hclge_desc desc;
6608         int ret;
6609
6610         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6611
6612         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6613         req->vlan_type = vlan_type;
6614         req->vlan_fe = filter_en ? fe_type : 0;
6615         req->vf_id = vf_id;
6616
6617         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6618         if (ret)
6619                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6620                         ret);
6621
6622         return ret;
6623 }
6624
6625 #define HCLGE_FILTER_TYPE_VF            0
6626 #define HCLGE_FILTER_TYPE_PORT          1
6627 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
6628 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
6629 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
6630 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
6631 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
6632 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
6633                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6634 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
6635                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6636
6637 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6638 {
6639         struct hclge_vport *vport = hclge_get_vport(handle);
6640         struct hclge_dev *hdev = vport->back;
6641
6642         if (hdev->pdev->revision >= 0x21) {
6643                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6644                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
6645                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6646                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
6647         } else {
6648                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6649                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6650                                            0);
6651         }
6652         if (enable)
6653                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6654         else
6655                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6656 }
6657
6658 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6659                                     bool is_kill, u16 vlan, u8 qos,
6660                                     __be16 proto)
6661 {
6662 #define HCLGE_MAX_VF_BYTES  16
6663         struct hclge_vlan_filter_vf_cfg_cmd *req0;
6664         struct hclge_vlan_filter_vf_cfg_cmd *req1;
6665         struct hclge_desc desc[2];
6666         u8 vf_byte_val;
6667         u8 vf_byte_off;
6668         int ret;
6669
6670         hclge_cmd_setup_basic_desc(&desc[0],
6671                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6672         hclge_cmd_setup_basic_desc(&desc[1],
6673                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6674
6675         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6676
6677         vf_byte_off = vfid / 8;
6678         vf_byte_val = 1 << (vfid % 8);
6679
6680         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6681         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6682
6683         req0->vlan_id  = cpu_to_le16(vlan);
6684         req0->vlan_cfg = is_kill;
6685
6686         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6687                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6688         else
6689                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6690
6691         ret = hclge_cmd_send(&hdev->hw, desc, 2);
6692         if (ret) {
6693                 dev_err(&hdev->pdev->dev,
6694                         "Send vf vlan command fail, ret =%d.\n",
6695                         ret);
6696                 return ret;
6697         }
6698
6699         if (!is_kill) {
6700 #define HCLGE_VF_VLAN_NO_ENTRY  2
6701                 if (!req0->resp_code || req0->resp_code == 1)
6702                         return 0;
6703
6704                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6705                         dev_warn(&hdev->pdev->dev,
6706                                  "vf vlan table is full, vf vlan filter is disabled\n");
6707                         return 0;
6708                 }
6709
6710                 dev_err(&hdev->pdev->dev,
6711                         "Add vf vlan filter fail, ret =%d.\n",
6712                         req0->resp_code);
6713         } else {
6714 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
6715                 if (!req0->resp_code)
6716                         return 0;
6717
6718                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6719                         dev_warn(&hdev->pdev->dev,
6720                                  "vlan %d filter is not in vf vlan table\n",
6721                                  vlan);
6722                         return 0;
6723                 }
6724
6725                 dev_err(&hdev->pdev->dev,
6726                         "Kill vf vlan filter fail, ret =%d.\n",
6727                         req0->resp_code);
6728         }
6729
6730         return -EIO;
6731 }
6732
6733 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6734                                       u16 vlan_id, bool is_kill)
6735 {
6736         struct hclge_vlan_filter_pf_cfg_cmd *req;
6737         struct hclge_desc desc;
6738         u8 vlan_offset_byte_val;
6739         u8 vlan_offset_byte;
6740         u8 vlan_offset_160;
6741         int ret;
6742
6743         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6744
6745         vlan_offset_160 = vlan_id / 160;
6746         vlan_offset_byte = (vlan_id % 160) / 8;
6747         vlan_offset_byte_val = 1 << (vlan_id % 8);
6748
6749         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6750         req->vlan_offset = vlan_offset_160;
6751         req->vlan_cfg = is_kill;
6752         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6753
6754         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6755         if (ret)
6756                 dev_err(&hdev->pdev->dev,
6757                         "port vlan command, send fail, ret =%d.\n", ret);
6758         return ret;
6759 }
6760
6761 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6762                                     u16 vport_id, u16 vlan_id, u8 qos,
6763                                     bool is_kill)
6764 {
6765         u16 vport_idx, vport_num = 0;
6766         int ret;
6767
6768         if (is_kill && !vlan_id)
6769                 return 0;
6770
6771         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6772                                        0, proto);
6773         if (ret) {
6774                 dev_err(&hdev->pdev->dev,
6775                         "Set %d vport vlan filter config fail, ret =%d.\n",
6776                         vport_id, ret);
6777                 return ret;
6778         }
6779
6780         /* vlan 0 may be added twice when 8021q module is enabled */
6781         if (!is_kill && !vlan_id &&
6782             test_bit(vport_id, hdev->vlan_table[vlan_id]))
6783                 return 0;
6784
6785         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6786                 dev_err(&hdev->pdev->dev,
6787                         "Add port vlan failed, vport %d is already in vlan %d\n",
6788                         vport_id, vlan_id);
6789                 return -EINVAL;
6790         }
6791
6792         if (is_kill &&
6793             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6794                 dev_err(&hdev->pdev->dev,
6795                         "Delete port vlan failed, vport %d is not in vlan %d\n",
6796                         vport_id, vlan_id);
6797                 return -EINVAL;
6798         }
6799
6800         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6801                 vport_num++;
6802
6803         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6804                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6805                                                  is_kill);
6806
6807         return ret;
6808 }
6809
6810 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6811 {
6812         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6813         struct hclge_vport_vtag_tx_cfg_cmd *req;
6814         struct hclge_dev *hdev = vport->back;
6815         struct hclge_desc desc;
6816         int status;
6817
6818         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6819
6820         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6821         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6822         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6823         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6824                       vcfg->accept_tag1 ? 1 : 0);
6825         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6826                       vcfg->accept_untag1 ? 1 : 0);
6827         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6828                       vcfg->accept_tag2 ? 1 : 0);
6829         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6830                       vcfg->accept_untag2 ? 1 : 0);
6831         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6832                       vcfg->insert_tag1_en ? 1 : 0);
6833         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6834                       vcfg->insert_tag2_en ? 1 : 0);
6835         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6836
6837         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6838         req->vf_bitmap[req->vf_offset] =
6839                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6840
6841         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6842         if (status)
6843                 dev_err(&hdev->pdev->dev,
6844                         "Send port txvlan cfg command fail, ret =%d\n",
6845                         status);
6846
6847         return status;
6848 }
6849
6850 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6851 {
6852         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6853         struct hclge_vport_vtag_rx_cfg_cmd *req;
6854         struct hclge_dev *hdev = vport->back;
6855         struct hclge_desc desc;
6856         int status;
6857
6858         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6859
6860         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6861         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6862                       vcfg->strip_tag1_en ? 1 : 0);
6863         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6864                       vcfg->strip_tag2_en ? 1 : 0);
6865         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6866                       vcfg->vlan1_vlan_prionly ? 1 : 0);
6867         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6868                       vcfg->vlan2_vlan_prionly ? 1 : 0);
6869
6870         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6871         req->vf_bitmap[req->vf_offset] =
6872                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6873
6874         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6875         if (status)
6876                 dev_err(&hdev->pdev->dev,
6877                         "Send port rxvlan cfg command fail, ret =%d\n",
6878                         status);
6879
6880         return status;
6881 }
6882
6883 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
6884                                   u16 port_base_vlan_state,
6885                                   u16 vlan_tag)
6886 {
6887         int ret;
6888
6889         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6890                 vport->txvlan_cfg.accept_tag1 = true;
6891                 vport->txvlan_cfg.insert_tag1_en = false;
6892                 vport->txvlan_cfg.default_tag1 = 0;
6893         } else {
6894                 vport->txvlan_cfg.accept_tag1 = false;
6895                 vport->txvlan_cfg.insert_tag1_en = true;
6896                 vport->txvlan_cfg.default_tag1 = vlan_tag;
6897         }
6898
6899         vport->txvlan_cfg.accept_untag1 = true;
6900
6901         /* accept_tag2 and accept_untag2 are not supported on
6902          * pdev revision(0x20), new revision support them,
6903          * this two fields can not be configured by user.
6904          */
6905         vport->txvlan_cfg.accept_tag2 = true;
6906         vport->txvlan_cfg.accept_untag2 = true;
6907         vport->txvlan_cfg.insert_tag2_en = false;
6908         vport->txvlan_cfg.default_tag2 = 0;
6909
6910         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6911                 vport->rxvlan_cfg.strip_tag1_en = false;
6912                 vport->rxvlan_cfg.strip_tag2_en =
6913                                 vport->rxvlan_cfg.rx_vlan_offload_en;
6914         } else {
6915                 vport->rxvlan_cfg.strip_tag1_en =
6916                                 vport->rxvlan_cfg.rx_vlan_offload_en;
6917                 vport->rxvlan_cfg.strip_tag2_en = true;
6918         }
6919         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6920         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6921
6922         ret = hclge_set_vlan_tx_offload_cfg(vport);
6923         if (ret)
6924                 return ret;
6925
6926         return hclge_set_vlan_rx_offload_cfg(vport);
6927 }
6928
6929 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6930 {
6931         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6932         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6933         struct hclge_desc desc;
6934         int status;
6935
6936         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6937         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6938         rx_req->ot_fst_vlan_type =
6939                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6940         rx_req->ot_sec_vlan_type =
6941                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6942         rx_req->in_fst_vlan_type =
6943                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6944         rx_req->in_sec_vlan_type =
6945                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6946
6947         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6948         if (status) {
6949                 dev_err(&hdev->pdev->dev,
6950                         "Send rxvlan protocol type command fail, ret =%d\n",
6951                         status);
6952                 return status;
6953         }
6954
6955         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6956
6957         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6958         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6959         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6960
6961         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6962         if (status)
6963                 dev_err(&hdev->pdev->dev,
6964                         "Send txvlan protocol type command fail, ret =%d\n",
6965                         status);
6966
6967         return status;
6968 }
6969
6970 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6971 {
6972 #define HCLGE_DEF_VLAN_TYPE             0x8100
6973
6974         struct hnae3_handle *handle = &hdev->vport[0].nic;
6975         struct hclge_vport *vport;
6976         int ret;
6977         int i;
6978
6979         if (hdev->pdev->revision >= 0x21) {
6980                 /* for revision 0x21, vf vlan filter is per function */
6981                 for (i = 0; i < hdev->num_alloc_vport; i++) {
6982                         vport = &hdev->vport[i];
6983                         ret = hclge_set_vlan_filter_ctrl(hdev,
6984                                                          HCLGE_FILTER_TYPE_VF,
6985                                                          HCLGE_FILTER_FE_EGRESS,
6986                                                          true,
6987                                                          vport->vport_id);
6988                         if (ret)
6989                                 return ret;
6990                 }
6991
6992                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6993                                                  HCLGE_FILTER_FE_INGRESS, true,
6994                                                  0);
6995                 if (ret)
6996                         return ret;
6997         } else {
6998                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6999                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7000                                                  true, 0);
7001                 if (ret)
7002                         return ret;
7003         }
7004
7005         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7006
7007         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7008         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7009         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7010         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7011         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7012         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7013
7014         ret = hclge_set_vlan_protocol_type(hdev);
7015         if (ret)
7016                 return ret;
7017
7018         for (i = 0; i < hdev->num_alloc_vport; i++) {
7019                 u16 vlan_tag;
7020
7021                 vport = &hdev->vport[i];
7022                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7023
7024                 ret = hclge_vlan_offload_cfg(vport,
7025                                              vport->port_base_vlan_cfg.state,
7026                                              vlan_tag);
7027                 if (ret)
7028                         return ret;
7029         }
7030
7031         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7032 }
7033
7034 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7035                                        bool writen_to_tbl)
7036 {
7037         struct hclge_vport_vlan_cfg *vlan;
7038
7039         /* vlan 0 is reserved */
7040         if (!vlan_id)
7041                 return;
7042
7043         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7044         if (!vlan)
7045                 return;
7046
7047         vlan->hd_tbl_status = writen_to_tbl;
7048         vlan->vlan_id = vlan_id;
7049
7050         list_add_tail(&vlan->node, &vport->vlan_list);
7051 }
7052
7053 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7054 {
7055         struct hclge_vport_vlan_cfg *vlan, *tmp;
7056         struct hclge_dev *hdev = vport->back;
7057         int ret;
7058
7059         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7060                 if (!vlan->hd_tbl_status) {
7061                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7062                                                        vport->vport_id,
7063                                                        vlan->vlan_id, 0, false);
7064                         if (ret) {
7065                                 dev_err(&hdev->pdev->dev,
7066                                         "restore vport vlan list failed, ret=%d\n",
7067                                         ret);
7068                                 return ret;
7069                         }
7070                 }
7071                 vlan->hd_tbl_status = true;
7072         }
7073
7074         return 0;
7075 }
7076
7077 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7078                                       bool is_write_tbl)
7079 {
7080         struct hclge_vport_vlan_cfg *vlan, *tmp;
7081         struct hclge_dev *hdev = vport->back;
7082
7083         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7084                 if (vlan->vlan_id == vlan_id) {
7085                         if (is_write_tbl && vlan->hd_tbl_status)
7086                                 hclge_set_vlan_filter_hw(hdev,
7087                                                          htons(ETH_P_8021Q),
7088                                                          vport->vport_id,
7089                                                          vlan_id, 0,
7090                                                          true);
7091
7092                         list_del(&vlan->node);
7093                         kfree(vlan);
7094                         break;
7095                 }
7096         }
7097 }
7098
7099 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7100 {
7101         struct hclge_vport_vlan_cfg *vlan, *tmp;
7102         struct hclge_dev *hdev = vport->back;
7103
7104         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7105                 if (vlan->hd_tbl_status)
7106                         hclge_set_vlan_filter_hw(hdev,
7107                                                  htons(ETH_P_8021Q),
7108                                                  vport->vport_id,
7109                                                  vlan->vlan_id, 0,
7110                                                  true);
7111
7112                 vlan->hd_tbl_status = false;
7113                 if (is_del_list) {
7114                         list_del(&vlan->node);
7115                         kfree(vlan);
7116                 }
7117         }
7118 }
7119
7120 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7121 {
7122         struct hclge_vport_vlan_cfg *vlan, *tmp;
7123         struct hclge_vport *vport;
7124         int i;
7125
7126         mutex_lock(&hdev->vport_cfg_mutex);
7127         for (i = 0; i < hdev->num_alloc_vport; i++) {
7128                 vport = &hdev->vport[i];
7129                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7130                         list_del(&vlan->node);
7131                         kfree(vlan);
7132                 }
7133         }
7134         mutex_unlock(&hdev->vport_cfg_mutex);
7135 }
7136
7137 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7138 {
7139         struct hclge_vport *vport = hclge_get_vport(handle);
7140
7141         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7142                 vport->rxvlan_cfg.strip_tag1_en = false;
7143                 vport->rxvlan_cfg.strip_tag2_en = enable;
7144         } else {
7145                 vport->rxvlan_cfg.strip_tag1_en = enable;
7146                 vport->rxvlan_cfg.strip_tag2_en = true;
7147         }
7148         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7149         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7150         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7151
7152         return hclge_set_vlan_rx_offload_cfg(vport);
7153 }
7154
7155 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7156                                             u16 port_base_vlan_state,
7157                                             struct hclge_vlan_info *new_info,
7158                                             struct hclge_vlan_info *old_info)
7159 {
7160         struct hclge_dev *hdev = vport->back;
7161         int ret;
7162
7163         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7164                 hclge_rm_vport_all_vlan_table(vport, false);
7165                 return hclge_set_vlan_filter_hw(hdev,
7166                                                  htons(new_info->vlan_proto),
7167                                                  vport->vport_id,
7168                                                  new_info->vlan_tag,
7169                                                  new_info->qos, false);
7170         }
7171
7172         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7173                                        vport->vport_id, old_info->vlan_tag,
7174                                        old_info->qos, true);
7175         if (ret)
7176                 return ret;
7177
7178         return hclge_add_vport_all_vlan_table(vport);
7179 }
7180
7181 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7182                                     struct hclge_vlan_info *vlan_info)
7183 {
7184         struct hnae3_handle *nic = &vport->nic;
7185         struct hclge_vlan_info *old_vlan_info;
7186         struct hclge_dev *hdev = vport->back;
7187         int ret;
7188
7189         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7190
7191         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7192         if (ret)
7193                 return ret;
7194
7195         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7196                 /* add new VLAN tag */
7197                 ret = hclge_set_vlan_filter_hw(hdev,
7198                                                htons(vlan_info->vlan_proto),
7199                                                vport->vport_id,
7200                                                vlan_info->vlan_tag,
7201                                                vlan_info->qos, false);
7202                 if (ret)
7203                         return ret;
7204
7205                 /* remove old VLAN tag */
7206                 ret = hclge_set_vlan_filter_hw(hdev,
7207                                                htons(old_vlan_info->vlan_proto),
7208                                                vport->vport_id,
7209                                                old_vlan_info->vlan_tag,
7210                                                old_vlan_info->qos, true);
7211                 if (ret)
7212                         return ret;
7213
7214                 goto update;
7215         }
7216
7217         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7218                                                old_vlan_info);
7219         if (ret)
7220                 return ret;
7221
7222         /* update state only when disable/enable port based VLAN */
7223         vport->port_base_vlan_cfg.state = state;
7224         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7225                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7226         else
7227                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7228
7229 update:
7230         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7231         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7232         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7233
7234         return 0;
7235 }
7236
7237 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7238                                           enum hnae3_port_base_vlan_state state,
7239                                           u16 vlan)
7240 {
7241         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7242                 if (!vlan)
7243                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7244                 else
7245                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7246         } else {
7247                 if (!vlan)
7248                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7249                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7250                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7251                 else
7252                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7253         }
7254 }
7255
7256 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7257                                     u16 vlan, u8 qos, __be16 proto)
7258 {
7259         struct hclge_vport *vport = hclge_get_vport(handle);
7260         struct hclge_dev *hdev = vport->back;
7261         struct hclge_vlan_info vlan_info;
7262         u16 state;
7263         int ret;
7264
7265         if (hdev->pdev->revision == 0x20)
7266                 return -EOPNOTSUPP;
7267
7268         /* qos is a 3 bits value, so can not be bigger than 7 */
7269         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7270                 return -EINVAL;
7271         if (proto != htons(ETH_P_8021Q))
7272                 return -EPROTONOSUPPORT;
7273
7274         vport = &hdev->vport[vfid];
7275         state = hclge_get_port_base_vlan_state(vport,
7276                                                vport->port_base_vlan_cfg.state,
7277                                                vlan);
7278         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7279                 return 0;
7280
7281         vlan_info.vlan_tag = vlan;
7282         vlan_info.qos = qos;
7283         vlan_info.vlan_proto = ntohs(proto);
7284
7285         /* update port based VLAN for PF */
7286         if (!vfid) {
7287                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7288                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7289                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7290
7291                 return ret;
7292         }
7293
7294         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7295                 return hclge_update_port_base_vlan_cfg(vport, state,
7296                                                        &vlan_info);
7297         } else {
7298                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7299                                                         (u8)vfid, state,
7300                                                         vlan, qos,
7301                                                         ntohs(proto));
7302                 return ret;
7303         }
7304 }
7305
7306 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7307                           u16 vlan_id, bool is_kill)
7308 {
7309         struct hclge_vport *vport = hclge_get_vport(handle);
7310         struct hclge_dev *hdev = vport->back;
7311         bool writen_to_tbl = false;
7312         int ret = 0;
7313
7314         /* when port based VLAN enabled, we use port based VLAN as the VLAN
7315          * filter entry. In this case, we don't update VLAN filter table
7316          * when user add new VLAN or remove exist VLAN, just update the vport
7317          * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7318          * table until port based VLAN disabled
7319          */
7320         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7321                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7322                                                vlan_id, 0, is_kill);
7323                 writen_to_tbl = true;
7324         }
7325
7326         if (ret)
7327                 return ret;
7328
7329         if (is_kill)
7330                 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7331         else
7332                 hclge_add_vport_vlan_table(vport, vlan_id,
7333                                            writen_to_tbl);
7334
7335         return 0;
7336 }
7337
7338 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7339 {
7340         struct hclge_config_max_frm_size_cmd *req;
7341         struct hclge_desc desc;
7342
7343         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7344
7345         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7346         req->max_frm_size = cpu_to_le16(new_mps);
7347         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7348
7349         return hclge_cmd_send(&hdev->hw, &desc, 1);
7350 }
7351
7352 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7353 {
7354         struct hclge_vport *vport = hclge_get_vport(handle);
7355
7356         return hclge_set_vport_mtu(vport, new_mtu);
7357 }
7358
7359 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7360 {
7361         struct hclge_dev *hdev = vport->back;
7362         int i, max_frm_size, ret = 0;
7363
7364         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7365         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7366             max_frm_size > HCLGE_MAC_MAX_FRAME)
7367                 return -EINVAL;
7368
7369         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7370         mutex_lock(&hdev->vport_lock);
7371         /* VF's mps must fit within hdev->mps */
7372         if (vport->vport_id && max_frm_size > hdev->mps) {
7373                 mutex_unlock(&hdev->vport_lock);
7374                 return -EINVAL;
7375         } else if (vport->vport_id) {
7376                 vport->mps = max_frm_size;
7377                 mutex_unlock(&hdev->vport_lock);
7378                 return 0;
7379         }
7380
7381         /* PF's mps must be greater then VF's mps */
7382         for (i = 1; i < hdev->num_alloc_vport; i++)
7383                 if (max_frm_size < hdev->vport[i].mps) {
7384                         mutex_unlock(&hdev->vport_lock);
7385                         return -EINVAL;
7386                 }
7387
7388         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7389
7390         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7391         if (ret) {
7392                 dev_err(&hdev->pdev->dev,
7393                         "Change mtu fail, ret =%d\n", ret);
7394                 goto out;
7395         }
7396
7397         hdev->mps = max_frm_size;
7398         vport->mps = max_frm_size;
7399
7400         ret = hclge_buffer_alloc(hdev);
7401         if (ret)
7402                 dev_err(&hdev->pdev->dev,
7403                         "Allocate buffer fail, ret =%d\n", ret);
7404
7405 out:
7406         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7407         mutex_unlock(&hdev->vport_lock);
7408         return ret;
7409 }
7410
7411 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7412                                     bool enable)
7413 {
7414         struct hclge_reset_tqp_queue_cmd *req;
7415         struct hclge_desc desc;
7416         int ret;
7417
7418         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7419
7420         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7421         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7422         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7423
7424         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7425         if (ret) {
7426                 dev_err(&hdev->pdev->dev,
7427                         "Send tqp reset cmd error, status =%d\n", ret);
7428                 return ret;
7429         }
7430
7431         return 0;
7432 }
7433
7434 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7435 {
7436         struct hclge_reset_tqp_queue_cmd *req;
7437         struct hclge_desc desc;
7438         int ret;
7439
7440         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7441
7442         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7443         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7444
7445         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7446         if (ret) {
7447                 dev_err(&hdev->pdev->dev,
7448                         "Get reset status error, status =%d\n", ret);
7449                 return ret;
7450         }
7451
7452         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7453 }
7454
7455 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7456 {
7457         struct hnae3_queue *queue;
7458         struct hclge_tqp *tqp;
7459
7460         queue = handle->kinfo.tqp[queue_id];
7461         tqp = container_of(queue, struct hclge_tqp, q);
7462
7463         return tqp->index;
7464 }
7465
7466 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7467 {
7468         struct hclge_vport *vport = hclge_get_vport(handle);
7469         struct hclge_dev *hdev = vport->back;
7470         int reset_try_times = 0;
7471         int reset_status;
7472         u16 queue_gid;
7473         int ret = 0;
7474
7475         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7476
7477         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7478         if (ret) {
7479                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7480                 return ret;
7481         }
7482
7483         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7484         if (ret) {
7485                 dev_err(&hdev->pdev->dev,
7486                         "Send reset tqp cmd fail, ret = %d\n", ret);
7487                 return ret;
7488         }
7489
7490         reset_try_times = 0;
7491         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7492                 /* Wait for tqp hw reset */
7493                 msleep(20);
7494                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7495                 if (reset_status)
7496                         break;
7497         }
7498
7499         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7500                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7501                 return ret;
7502         }
7503
7504         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7505         if (ret)
7506                 dev_err(&hdev->pdev->dev,
7507                         "Deassert the soft reset fail, ret = %d\n", ret);
7508
7509         return ret;
7510 }
7511
7512 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7513 {
7514         struct hclge_dev *hdev = vport->back;
7515         int reset_try_times = 0;
7516         int reset_status;
7517         u16 queue_gid;
7518         int ret;
7519
7520         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7521
7522         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7523         if (ret) {
7524                 dev_warn(&hdev->pdev->dev,
7525                          "Send reset tqp cmd fail, ret = %d\n", ret);
7526                 return;
7527         }
7528
7529         reset_try_times = 0;
7530         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7531                 /* Wait for tqp hw reset */
7532                 msleep(20);
7533                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7534                 if (reset_status)
7535                         break;
7536         }
7537
7538         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7539                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7540                 return;
7541         }
7542
7543         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7544         if (ret)
7545                 dev_warn(&hdev->pdev->dev,
7546                          "Deassert the soft reset fail, ret = %d\n", ret);
7547 }
7548
7549 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7550 {
7551         struct hclge_vport *vport = hclge_get_vport(handle);
7552         struct hclge_dev *hdev = vport->back;
7553
7554         return hdev->fw_version;
7555 }
7556
7557 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7558 {
7559         struct phy_device *phydev = hdev->hw.mac.phydev;
7560
7561         if (!phydev)
7562                 return;
7563
7564         phy_set_asym_pause(phydev, rx_en, tx_en);
7565 }
7566
7567 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7568 {
7569         int ret;
7570
7571         if (rx_en && tx_en)
7572                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7573         else if (rx_en && !tx_en)
7574                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7575         else if (!rx_en && tx_en)
7576                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7577         else
7578                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7579
7580         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7581                 return 0;
7582
7583         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7584         if (ret) {
7585                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7586                         ret);
7587                 return ret;
7588         }
7589
7590         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7591
7592         return 0;
7593 }
7594
7595 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7596 {
7597         struct phy_device *phydev = hdev->hw.mac.phydev;
7598         u16 remote_advertising = 0;
7599         u16 local_advertising = 0;
7600         u32 rx_pause, tx_pause;
7601         u8 flowctl;
7602
7603         if (!phydev->link || !phydev->autoneg)
7604                 return 0;
7605
7606         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7607
7608         if (phydev->pause)
7609                 remote_advertising = LPA_PAUSE_CAP;
7610
7611         if (phydev->asym_pause)
7612                 remote_advertising |= LPA_PAUSE_ASYM;
7613
7614         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7615                                            remote_advertising);
7616         tx_pause = flowctl & FLOW_CTRL_TX;
7617         rx_pause = flowctl & FLOW_CTRL_RX;
7618
7619         if (phydev->duplex == HCLGE_MAC_HALF) {
7620                 tx_pause = 0;
7621                 rx_pause = 0;
7622         }
7623
7624         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7625 }
7626
7627 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7628                                  u32 *rx_en, u32 *tx_en)
7629 {
7630         struct hclge_vport *vport = hclge_get_vport(handle);
7631         struct hclge_dev *hdev = vport->back;
7632
7633         *auto_neg = hclge_get_autoneg(handle);
7634
7635         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7636                 *rx_en = 0;
7637                 *tx_en = 0;
7638                 return;
7639         }
7640
7641         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7642                 *rx_en = 1;
7643                 *tx_en = 0;
7644         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7645                 *tx_en = 1;
7646                 *rx_en = 0;
7647         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7648                 *rx_en = 1;
7649                 *tx_en = 1;
7650         } else {
7651                 *rx_en = 0;
7652                 *tx_en = 0;
7653         }
7654 }
7655
7656 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7657                                 u32 rx_en, u32 tx_en)
7658 {
7659         struct hclge_vport *vport = hclge_get_vport(handle);
7660         struct hclge_dev *hdev = vport->back;
7661         struct phy_device *phydev = hdev->hw.mac.phydev;
7662         u32 fc_autoneg;
7663
7664         fc_autoneg = hclge_get_autoneg(handle);
7665         if (auto_neg != fc_autoneg) {
7666                 dev_info(&hdev->pdev->dev,
7667                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7668                 return -EOPNOTSUPP;
7669         }
7670
7671         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7672                 dev_info(&hdev->pdev->dev,
7673                          "Priority flow control enabled. Cannot set link flow control.\n");
7674                 return -EOPNOTSUPP;
7675         }
7676
7677         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7678
7679         if (!fc_autoneg)
7680                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7681
7682         if (phydev)
7683                 return phy_start_aneg(phydev);
7684
7685         if (hdev->pdev->revision == 0x20)
7686                 return -EOPNOTSUPP;
7687
7688         return hclge_restart_autoneg(handle);
7689 }
7690
7691 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7692                                           u8 *auto_neg, u32 *speed, u8 *duplex)
7693 {
7694         struct hclge_vport *vport = hclge_get_vport(handle);
7695         struct hclge_dev *hdev = vport->back;
7696
7697         if (speed)
7698                 *speed = hdev->hw.mac.speed;
7699         if (duplex)
7700                 *duplex = hdev->hw.mac.duplex;
7701         if (auto_neg)
7702                 *auto_neg = hdev->hw.mac.autoneg;
7703 }
7704
7705 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
7706                                  u8 *module_type)
7707 {
7708         struct hclge_vport *vport = hclge_get_vport(handle);
7709         struct hclge_dev *hdev = vport->back;
7710
7711         if (media_type)
7712                 *media_type = hdev->hw.mac.media_type;
7713
7714         if (module_type)
7715                 *module_type = hdev->hw.mac.module_type;
7716 }
7717
7718 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7719                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7720 {
7721         struct hclge_vport *vport = hclge_get_vport(handle);
7722         struct hclge_dev *hdev = vport->back;
7723         struct phy_device *phydev = hdev->hw.mac.phydev;
7724         int mdix_ctrl, mdix, retval, is_resolved;
7725
7726         if (!phydev) {
7727                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7728                 *tp_mdix = ETH_TP_MDI_INVALID;
7729                 return;
7730         }
7731
7732         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7733
7734         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7735         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7736                                     HCLGE_PHY_MDIX_CTRL_S);
7737
7738         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7739         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7740         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7741
7742         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7743
7744         switch (mdix_ctrl) {
7745         case 0x0:
7746                 *tp_mdix_ctrl = ETH_TP_MDI;
7747                 break;
7748         case 0x1:
7749                 *tp_mdix_ctrl = ETH_TP_MDI_X;
7750                 break;
7751         case 0x3:
7752                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7753                 break;
7754         default:
7755                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7756                 break;
7757         }
7758
7759         if (!is_resolved)
7760                 *tp_mdix = ETH_TP_MDI_INVALID;
7761         else if (mdix)
7762                 *tp_mdix = ETH_TP_MDI_X;
7763         else
7764                 *tp_mdix = ETH_TP_MDI;
7765 }
7766
7767 static void hclge_info_show(struct hclge_dev *hdev)
7768 {
7769         struct device *dev = &hdev->pdev->dev;
7770
7771         dev_info(dev, "PF info begin:\n");
7772
7773         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
7774         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
7775         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
7776         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
7777         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
7778         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
7779         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
7780         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
7781         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
7782         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
7783         dev_info(dev, "This is %s PF\n",
7784                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
7785         dev_info(dev, "DCB %s\n",
7786                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
7787         dev_info(dev, "MQPRIO %s\n",
7788                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
7789
7790         dev_info(dev, "PF info end.\n");
7791 }
7792
7793 static int hclge_init_client_instance(struct hnae3_client *client,
7794                                       struct hnae3_ae_dev *ae_dev)
7795 {
7796         struct hclge_dev *hdev = ae_dev->priv;
7797         struct hclge_vport *vport;
7798         int i, ret;
7799
7800         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
7801                 vport = &hdev->vport[i];
7802
7803                 switch (client->type) {
7804                 case HNAE3_CLIENT_KNIC:
7805
7806                         hdev->nic_client = client;
7807                         vport->nic.client = client;
7808                         ret = client->ops->init_instance(&vport->nic);
7809                         if (ret)
7810                                 goto clear_nic;
7811
7812                         hnae3_set_client_init_flag(client, ae_dev, 1);
7813
7814                         if (netif_msg_drv(&hdev->vport->nic))
7815                                 hclge_info_show(hdev);
7816
7817                         if (hdev->roce_client &&
7818                             hnae3_dev_roce_supported(hdev)) {
7819                                 struct hnae3_client *rc = hdev->roce_client;
7820
7821                                 ret = hclge_init_roce_base_info(vport);
7822                                 if (ret)
7823                                         goto clear_roce;
7824
7825                                 ret = rc->ops->init_instance(&vport->roce);
7826                                 if (ret)
7827                                         goto clear_roce;
7828
7829                                 hnae3_set_client_init_flag(hdev->roce_client,
7830                                                            ae_dev, 1);
7831                         }
7832
7833                         break;
7834                 case HNAE3_CLIENT_UNIC:
7835                         hdev->nic_client = client;
7836                         vport->nic.client = client;
7837
7838                         ret = client->ops->init_instance(&vport->nic);
7839                         if (ret)
7840                                 goto clear_nic;
7841
7842                         hnae3_set_client_init_flag(client, ae_dev, 1);
7843
7844                         break;
7845                 case HNAE3_CLIENT_ROCE:
7846                         if (hnae3_dev_roce_supported(hdev)) {
7847                                 hdev->roce_client = client;
7848                                 vport->roce.client = client;
7849                         }
7850
7851                         if (hdev->roce_client && hdev->nic_client) {
7852                                 ret = hclge_init_roce_base_info(vport);
7853                                 if (ret)
7854                                         goto clear_roce;
7855
7856                                 ret = client->ops->init_instance(&vport->roce);
7857                                 if (ret)
7858                                         goto clear_roce;
7859
7860                                 hnae3_set_client_init_flag(client, ae_dev, 1);
7861                         }
7862
7863                         break;
7864                 default:
7865                         return -EINVAL;
7866                 }
7867         }
7868
7869         return 0;
7870
7871 clear_nic:
7872         hdev->nic_client = NULL;
7873         vport->nic.client = NULL;
7874         return ret;
7875 clear_roce:
7876         hdev->roce_client = NULL;
7877         vport->roce.client = NULL;
7878         return ret;
7879 }
7880
7881 static void hclge_uninit_client_instance(struct hnae3_client *client,
7882                                          struct hnae3_ae_dev *ae_dev)
7883 {
7884         struct hclge_dev *hdev = ae_dev->priv;
7885         struct hclge_vport *vport;
7886         int i;
7887
7888         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7889                 vport = &hdev->vport[i];
7890                 if (hdev->roce_client) {
7891                         hdev->roce_client->ops->uninit_instance(&vport->roce,
7892                                                                 0);
7893                         hdev->roce_client = NULL;
7894                         vport->roce.client = NULL;
7895                 }
7896                 if (client->type == HNAE3_CLIENT_ROCE)
7897                         return;
7898                 if (hdev->nic_client && client->ops->uninit_instance) {
7899                         client->ops->uninit_instance(&vport->nic, 0);
7900                         hdev->nic_client = NULL;
7901                         vport->nic.client = NULL;
7902                 }
7903         }
7904 }
7905
7906 static int hclge_pci_init(struct hclge_dev *hdev)
7907 {
7908         struct pci_dev *pdev = hdev->pdev;
7909         struct hclge_hw *hw;
7910         int ret;
7911
7912         ret = pci_enable_device(pdev);
7913         if (ret) {
7914                 dev_err(&pdev->dev, "failed to enable PCI device\n");
7915                 return ret;
7916         }
7917
7918         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7919         if (ret) {
7920                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7921                 if (ret) {
7922                         dev_err(&pdev->dev,
7923                                 "can't set consistent PCI DMA");
7924                         goto err_disable_device;
7925                 }
7926                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7927         }
7928
7929         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7930         if (ret) {
7931                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7932                 goto err_disable_device;
7933         }
7934
7935         pci_set_master(pdev);
7936         hw = &hdev->hw;
7937         hw->io_base = pcim_iomap(pdev, 2, 0);
7938         if (!hw->io_base) {
7939                 dev_err(&pdev->dev, "Can't map configuration register space\n");
7940                 ret = -ENOMEM;
7941                 goto err_clr_master;
7942         }
7943
7944         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7945
7946         return 0;
7947 err_clr_master:
7948         pci_clear_master(pdev);
7949         pci_release_regions(pdev);
7950 err_disable_device:
7951         pci_disable_device(pdev);
7952
7953         return ret;
7954 }
7955
7956 static void hclge_pci_uninit(struct hclge_dev *hdev)
7957 {
7958         struct pci_dev *pdev = hdev->pdev;
7959
7960         pcim_iounmap(pdev, hdev->hw.io_base);
7961         pci_free_irq_vectors(pdev);
7962         pci_clear_master(pdev);
7963         pci_release_mem_regions(pdev);
7964         pci_disable_device(pdev);
7965 }
7966
7967 static void hclge_state_init(struct hclge_dev *hdev)
7968 {
7969         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7970         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7971         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7972         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7973         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7974         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7975 }
7976
7977 static void hclge_state_uninit(struct hclge_dev *hdev)
7978 {
7979         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7980
7981         if (hdev->service_timer.function)
7982                 del_timer_sync(&hdev->service_timer);
7983         if (hdev->reset_timer.function)
7984                 del_timer_sync(&hdev->reset_timer);
7985         if (hdev->service_task.func)
7986                 cancel_work_sync(&hdev->service_task);
7987         if (hdev->rst_service_task.func)
7988                 cancel_work_sync(&hdev->rst_service_task);
7989         if (hdev->mbx_service_task.func)
7990                 cancel_work_sync(&hdev->mbx_service_task);
7991 }
7992
7993 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7994 {
7995 #define HCLGE_FLR_WAIT_MS       100
7996 #define HCLGE_FLR_WAIT_CNT      50
7997         struct hclge_dev *hdev = ae_dev->priv;
7998         int cnt = 0;
7999
8000         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8001         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8002         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8003         hclge_reset_event(hdev->pdev, NULL);
8004
8005         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8006                cnt++ < HCLGE_FLR_WAIT_CNT)
8007                 msleep(HCLGE_FLR_WAIT_MS);
8008
8009         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8010                 dev_err(&hdev->pdev->dev,
8011                         "flr wait down timeout: %d\n", cnt);
8012 }
8013
8014 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8015 {
8016         struct hclge_dev *hdev = ae_dev->priv;
8017
8018         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8019 }
8020
8021 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8022 {
8023         struct pci_dev *pdev = ae_dev->pdev;
8024         struct hclge_dev *hdev;
8025         int ret;
8026
8027         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8028         if (!hdev) {
8029                 ret = -ENOMEM;
8030                 goto out;
8031         }
8032
8033         hdev->pdev = pdev;
8034         hdev->ae_dev = ae_dev;
8035         hdev->reset_type = HNAE3_NONE_RESET;
8036         hdev->reset_level = HNAE3_FUNC_RESET;
8037         ae_dev->priv = hdev;
8038         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8039
8040         mutex_init(&hdev->vport_lock);
8041         mutex_init(&hdev->vport_cfg_mutex);
8042
8043         ret = hclge_pci_init(hdev);
8044         if (ret) {
8045                 dev_err(&pdev->dev, "PCI init failed\n");
8046                 goto out;
8047         }
8048
8049         /* Firmware command queue initialize */
8050         ret = hclge_cmd_queue_init(hdev);
8051         if (ret) {
8052                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8053                 goto err_pci_uninit;
8054         }
8055
8056         /* Firmware command initialize */
8057         ret = hclge_cmd_init(hdev);
8058         if (ret)
8059                 goto err_cmd_uninit;
8060
8061         ret = hclge_get_cap(hdev);
8062         if (ret) {
8063                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8064                         ret);
8065                 goto err_cmd_uninit;
8066         }
8067
8068         ret = hclge_configure(hdev);
8069         if (ret) {
8070                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8071                 goto err_cmd_uninit;
8072         }
8073
8074         ret = hclge_init_msi(hdev);
8075         if (ret) {
8076                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8077                 goto err_cmd_uninit;
8078         }
8079
8080         ret = hclge_misc_irq_init(hdev);
8081         if (ret) {
8082                 dev_err(&pdev->dev,
8083                         "Misc IRQ(vector0) init error, ret = %d.\n",
8084                         ret);
8085                 goto err_msi_uninit;
8086         }
8087
8088         ret = hclge_alloc_tqps(hdev);
8089         if (ret) {
8090                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8091                 goto err_msi_irq_uninit;
8092         }
8093
8094         ret = hclge_alloc_vport(hdev);
8095         if (ret) {
8096                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8097                 goto err_msi_irq_uninit;
8098         }
8099
8100         ret = hclge_map_tqp(hdev);
8101         if (ret) {
8102                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8103                 goto err_msi_irq_uninit;
8104         }
8105
8106         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8107                 ret = hclge_mac_mdio_config(hdev);
8108                 if (ret) {
8109                         dev_err(&hdev->pdev->dev,
8110                                 "mdio config fail ret=%d\n", ret);
8111                         goto err_msi_irq_uninit;
8112                 }
8113         }
8114
8115         ret = hclge_init_umv_space(hdev);
8116         if (ret) {
8117                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8118                 goto err_mdiobus_unreg;
8119         }
8120
8121         ret = hclge_mac_init(hdev);
8122         if (ret) {
8123                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8124                 goto err_mdiobus_unreg;
8125         }
8126
8127         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8128         if (ret) {
8129                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8130                 goto err_mdiobus_unreg;
8131         }
8132
8133         ret = hclge_config_gro(hdev, true);
8134         if (ret)
8135                 goto err_mdiobus_unreg;
8136
8137         ret = hclge_init_vlan_config(hdev);
8138         if (ret) {
8139                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8140                 goto err_mdiobus_unreg;
8141         }
8142
8143         ret = hclge_tm_schd_init(hdev);
8144         if (ret) {
8145                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8146                 goto err_mdiobus_unreg;
8147         }
8148
8149         hclge_rss_init_cfg(hdev);
8150         ret = hclge_rss_init_hw(hdev);
8151         if (ret) {
8152                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8153                 goto err_mdiobus_unreg;
8154         }
8155
8156         ret = init_mgr_tbl(hdev);
8157         if (ret) {
8158                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8159                 goto err_mdiobus_unreg;
8160         }
8161
8162         ret = hclge_init_fd_config(hdev);
8163         if (ret) {
8164                 dev_err(&pdev->dev,
8165                         "fd table init fail, ret=%d\n", ret);
8166                 goto err_mdiobus_unreg;
8167         }
8168
8169         ret = hclge_hw_error_set_state(hdev, true);
8170         if (ret) {
8171                 dev_err(&pdev->dev,
8172                         "fail(%d) to enable hw error interrupts\n", ret);
8173                 goto err_mdiobus_unreg;
8174         }
8175
8176         INIT_KFIFO(hdev->mac_tnl_log);
8177
8178         hclge_dcb_ops_set(hdev);
8179
8180         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8181         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8182         INIT_WORK(&hdev->service_task, hclge_service_task);
8183         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8184         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8185
8186         hclge_clear_all_event_cause(hdev);
8187
8188         /* Enable MISC vector(vector0) */
8189         hclge_enable_vector(&hdev->misc_vector, true);
8190
8191         hclge_state_init(hdev);
8192         hdev->last_reset_time = jiffies;
8193
8194         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8195         return 0;
8196
8197 err_mdiobus_unreg:
8198         if (hdev->hw.mac.phydev)
8199                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8200 err_msi_irq_uninit:
8201         hclge_misc_irq_uninit(hdev);
8202 err_msi_uninit:
8203         pci_free_irq_vectors(pdev);
8204 err_cmd_uninit:
8205         hclge_cmd_uninit(hdev);
8206 err_pci_uninit:
8207         pcim_iounmap(pdev, hdev->hw.io_base);
8208         pci_clear_master(pdev);
8209         pci_release_regions(pdev);
8210         pci_disable_device(pdev);
8211 out:
8212         return ret;
8213 }
8214
8215 static void hclge_stats_clear(struct hclge_dev *hdev)
8216 {
8217         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8218 }
8219
8220 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8221 {
8222         struct hclge_vport *vport = hdev->vport;
8223         int i;
8224
8225         for (i = 0; i < hdev->num_alloc_vport; i++) {
8226                 hclge_vport_stop(vport);
8227                 vport++;
8228         }
8229 }
8230
8231 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8232 {
8233         struct hclge_dev *hdev = ae_dev->priv;
8234         struct pci_dev *pdev = ae_dev->pdev;
8235         int ret;
8236
8237         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8238
8239         hclge_stats_clear(hdev);
8240         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8241
8242         ret = hclge_cmd_init(hdev);
8243         if (ret) {
8244                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8245                 return ret;
8246         }
8247
8248         ret = hclge_map_tqp(hdev);
8249         if (ret) {
8250                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8251                 return ret;
8252         }
8253
8254         hclge_reset_umv_space(hdev);
8255
8256         ret = hclge_mac_init(hdev);
8257         if (ret) {
8258                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8259                 return ret;
8260         }
8261
8262         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8263         if (ret) {
8264                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8265                 return ret;
8266         }
8267
8268         ret = hclge_config_gro(hdev, true);
8269         if (ret)
8270                 return ret;
8271
8272         ret = hclge_init_vlan_config(hdev);
8273         if (ret) {
8274                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8275                 return ret;
8276         }
8277
8278         ret = hclge_tm_init_hw(hdev, true);
8279         if (ret) {
8280                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8281                 return ret;
8282         }
8283
8284         ret = hclge_rss_init_hw(hdev);
8285         if (ret) {
8286                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8287                 return ret;
8288         }
8289
8290         ret = hclge_init_fd_config(hdev);
8291         if (ret) {
8292                 dev_err(&pdev->dev,
8293                         "fd table init fail, ret=%d\n", ret);
8294                 return ret;
8295         }
8296
8297         /* Re-enable the hw error interrupts because
8298          * the interrupts get disabled on core/global reset.
8299          */
8300         ret = hclge_hw_error_set_state(hdev, true);
8301         if (ret) {
8302                 dev_err(&pdev->dev,
8303                         "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8304                 return ret;
8305         }
8306
8307         hclge_reset_vport_state(hdev);
8308
8309         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8310                  HCLGE_DRIVER_NAME);
8311
8312         return 0;
8313 }
8314
8315 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8316 {
8317         struct hclge_dev *hdev = ae_dev->priv;
8318         struct hclge_mac *mac = &hdev->hw.mac;
8319
8320         hclge_state_uninit(hdev);
8321
8322         if (mac->phydev)
8323                 mdiobus_unregister(mac->mdio_bus);
8324
8325         hclge_uninit_umv_space(hdev);
8326
8327         /* Disable MISC vector(vector0) */
8328         hclge_enable_vector(&hdev->misc_vector, false);
8329         synchronize_irq(hdev->misc_vector.vector_irq);
8330
8331         hclge_config_mac_tnl_int(hdev, false);
8332         hclge_hw_error_set_state(hdev, false);
8333         hclge_cmd_uninit(hdev);
8334         hclge_misc_irq_uninit(hdev);
8335         hclge_pci_uninit(hdev);
8336         mutex_destroy(&hdev->vport_lock);
8337         hclge_uninit_vport_mac_table(hdev);
8338         hclge_uninit_vport_vlan_table(hdev);
8339         mutex_destroy(&hdev->vport_cfg_mutex);
8340         ae_dev->priv = NULL;
8341 }
8342
8343 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8344 {
8345         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8346         struct hclge_vport *vport = hclge_get_vport(handle);
8347         struct hclge_dev *hdev = vport->back;
8348
8349         return min_t(u32, hdev->rss_size_max,
8350                      vport->alloc_tqps / kinfo->num_tc);
8351 }
8352
8353 static void hclge_get_channels(struct hnae3_handle *handle,
8354                                struct ethtool_channels *ch)
8355 {
8356         ch->max_combined = hclge_get_max_channels(handle);
8357         ch->other_count = 1;
8358         ch->max_other = 1;
8359         ch->combined_count = handle->kinfo.rss_size;
8360 }
8361
8362 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8363                                         u16 *alloc_tqps, u16 *max_rss_size)
8364 {
8365         struct hclge_vport *vport = hclge_get_vport(handle);
8366         struct hclge_dev *hdev = vport->back;
8367
8368         *alloc_tqps = vport->alloc_tqps;
8369         *max_rss_size = hdev->rss_size_max;
8370 }
8371
8372 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8373                               bool rxfh_configured)
8374 {
8375         struct hclge_vport *vport = hclge_get_vport(handle);
8376         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8377         struct hclge_dev *hdev = vport->back;
8378         int cur_rss_size = kinfo->rss_size;
8379         int cur_tqps = kinfo->num_tqps;
8380         u16 tc_offset[HCLGE_MAX_TC_NUM];
8381         u16 tc_valid[HCLGE_MAX_TC_NUM];
8382         u16 tc_size[HCLGE_MAX_TC_NUM];
8383         u16 roundup_size;
8384         u32 *rss_indir;
8385         int ret, i;
8386
8387         kinfo->req_rss_size = new_tqps_num;
8388
8389         ret = hclge_tm_vport_map_update(hdev);
8390         if (ret) {
8391                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8392                 return ret;
8393         }
8394
8395         roundup_size = roundup_pow_of_two(kinfo->rss_size);
8396         roundup_size = ilog2(roundup_size);
8397         /* Set the RSS TC mode according to the new RSS size */
8398         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8399                 tc_valid[i] = 0;
8400
8401                 if (!(hdev->hw_tc_map & BIT(i)))
8402                         continue;
8403
8404                 tc_valid[i] = 1;
8405                 tc_size[i] = roundup_size;
8406                 tc_offset[i] = kinfo->rss_size * i;
8407         }
8408         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8409         if (ret)
8410                 return ret;
8411
8412         /* RSS indirection table has been configuared by user */
8413         if (rxfh_configured)
8414                 goto out;
8415
8416         /* Reinitializes the rss indirect table according to the new RSS size */
8417         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8418         if (!rss_indir)
8419                 return -ENOMEM;
8420
8421         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8422                 rss_indir[i] = i % kinfo->rss_size;
8423
8424         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8425         if (ret)
8426                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8427                         ret);
8428
8429         kfree(rss_indir);
8430
8431 out:
8432         if (!ret)
8433                 dev_info(&hdev->pdev->dev,
8434                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8435                          cur_rss_size, kinfo->rss_size,
8436                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
8437
8438         return ret;
8439 }
8440
8441 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8442                               u32 *regs_num_64_bit)
8443 {
8444         struct hclge_desc desc;
8445         u32 total_num;
8446         int ret;
8447
8448         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8449         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8450         if (ret) {
8451                 dev_err(&hdev->pdev->dev,
8452                         "Query register number cmd failed, ret = %d.\n", ret);
8453                 return ret;
8454         }
8455
8456         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8457         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8458
8459         total_num = *regs_num_32_bit + *regs_num_64_bit;
8460         if (!total_num)
8461                 return -EINVAL;
8462
8463         return 0;
8464 }
8465
8466 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8467                                  void *data)
8468 {
8469 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8470
8471         struct hclge_desc *desc;
8472         u32 *reg_val = data;
8473         __le32 *desc_data;
8474         int cmd_num;
8475         int i, k, n;
8476         int ret;
8477
8478         if (regs_num == 0)
8479                 return 0;
8480
8481         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8482         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8483         if (!desc)
8484                 return -ENOMEM;
8485
8486         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8487         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8488         if (ret) {
8489                 dev_err(&hdev->pdev->dev,
8490                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8491                 kfree(desc);
8492                 return ret;
8493         }
8494
8495         for (i = 0; i < cmd_num; i++) {
8496                 if (i == 0) {
8497                         desc_data = (__le32 *)(&desc[i].data[0]);
8498                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8499                 } else {
8500                         desc_data = (__le32 *)(&desc[i]);
8501                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8502                 }
8503                 for (k = 0; k < n; k++) {
8504                         *reg_val++ = le32_to_cpu(*desc_data++);
8505
8506                         regs_num--;
8507                         if (!regs_num)
8508                                 break;
8509                 }
8510         }
8511
8512         kfree(desc);
8513         return 0;
8514 }
8515
8516 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8517                                  void *data)
8518 {
8519 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8520
8521         struct hclge_desc *desc;
8522         u64 *reg_val = data;
8523         __le64 *desc_data;
8524         int cmd_num;
8525         int i, k, n;
8526         int ret;
8527
8528         if (regs_num == 0)
8529                 return 0;
8530
8531         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8532         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8533         if (!desc)
8534                 return -ENOMEM;
8535
8536         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8537         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8538         if (ret) {
8539                 dev_err(&hdev->pdev->dev,
8540                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
8541                 kfree(desc);
8542                 return ret;
8543         }
8544
8545         for (i = 0; i < cmd_num; i++) {
8546                 if (i == 0) {
8547                         desc_data = (__le64 *)(&desc[i].data[0]);
8548                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8549                 } else {
8550                         desc_data = (__le64 *)(&desc[i]);
8551                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
8552                 }
8553                 for (k = 0; k < n; k++) {
8554                         *reg_val++ = le64_to_cpu(*desc_data++);
8555
8556                         regs_num--;
8557                         if (!regs_num)
8558                                 break;
8559                 }
8560         }
8561
8562         kfree(desc);
8563         return 0;
8564 }
8565
8566 #define MAX_SEPARATE_NUM        4
8567 #define SEPARATOR_VALUE         0xFFFFFFFF
8568 #define REG_NUM_PER_LINE        4
8569 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
8570
8571 static int hclge_get_regs_len(struct hnae3_handle *handle)
8572 {
8573         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8574         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8575         struct hclge_vport *vport = hclge_get_vport(handle);
8576         struct hclge_dev *hdev = vport->back;
8577         u32 regs_num_32_bit, regs_num_64_bit;
8578         int ret;
8579
8580         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8581         if (ret) {
8582                 dev_err(&hdev->pdev->dev,
8583                         "Get register number failed, ret = %d.\n", ret);
8584                 return -EOPNOTSUPP;
8585         }
8586
8587         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8588         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8589         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8590         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8591
8592         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8593                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8594                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8595 }
8596
8597 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8598                            void *data)
8599 {
8600         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8601         struct hclge_vport *vport = hclge_get_vport(handle);
8602         struct hclge_dev *hdev = vport->back;
8603         u32 regs_num_32_bit, regs_num_64_bit;
8604         int i, j, reg_um, separator_num;
8605         u32 *reg = data;
8606         int ret;
8607
8608         *version = hdev->fw_version;
8609
8610         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8611         if (ret) {
8612                 dev_err(&hdev->pdev->dev,
8613                         "Get register number failed, ret = %d.\n", ret);
8614                 return;
8615         }
8616
8617         /* fetching per-PF registers valus from PF PCIe register space */
8618         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8619         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8620         for (i = 0; i < reg_um; i++)
8621                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8622         for (i = 0; i < separator_num; i++)
8623                 *reg++ = SEPARATOR_VALUE;
8624
8625         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8626         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8627         for (i = 0; i < reg_um; i++)
8628                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8629         for (i = 0; i < separator_num; i++)
8630                 *reg++ = SEPARATOR_VALUE;
8631
8632         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8633         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8634         for (j = 0; j < kinfo->num_tqps; j++) {
8635                 for (i = 0; i < reg_um; i++)
8636                         *reg++ = hclge_read_dev(&hdev->hw,
8637                                                 ring_reg_addr_list[i] +
8638                                                 0x200 * j);
8639                 for (i = 0; i < separator_num; i++)
8640                         *reg++ = SEPARATOR_VALUE;
8641         }
8642
8643         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8644         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8645         for (j = 0; j < hdev->num_msi_used - 1; j++) {
8646                 for (i = 0; i < reg_um; i++)
8647                         *reg++ = hclge_read_dev(&hdev->hw,
8648                                                 tqp_intr_reg_addr_list[i] +
8649                                                 4 * j);
8650                 for (i = 0; i < separator_num; i++)
8651                         *reg++ = SEPARATOR_VALUE;
8652         }
8653
8654         /* fetching PF common registers values from firmware */
8655         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8656         if (ret) {
8657                 dev_err(&hdev->pdev->dev,
8658                         "Get 32 bit register failed, ret = %d.\n", ret);
8659                 return;
8660         }
8661
8662         reg += regs_num_32_bit;
8663         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8664         if (ret)
8665                 dev_err(&hdev->pdev->dev,
8666                         "Get 64 bit register failed, ret = %d.\n", ret);
8667 }
8668
8669 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8670 {
8671         struct hclge_set_led_state_cmd *req;
8672         struct hclge_desc desc;
8673         int ret;
8674
8675         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8676
8677         req = (struct hclge_set_led_state_cmd *)desc.data;
8678         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8679                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8680
8681         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8682         if (ret)
8683                 dev_err(&hdev->pdev->dev,
8684                         "Send set led state cmd error, ret =%d\n", ret);
8685
8686         return ret;
8687 }
8688
8689 enum hclge_led_status {
8690         HCLGE_LED_OFF,
8691         HCLGE_LED_ON,
8692         HCLGE_LED_NO_CHANGE = 0xFF,
8693 };
8694
8695 static int hclge_set_led_id(struct hnae3_handle *handle,
8696                             enum ethtool_phys_id_state status)
8697 {
8698         struct hclge_vport *vport = hclge_get_vport(handle);
8699         struct hclge_dev *hdev = vport->back;
8700
8701         switch (status) {
8702         case ETHTOOL_ID_ACTIVE:
8703                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
8704         case ETHTOOL_ID_INACTIVE:
8705                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8706         default:
8707                 return -EINVAL;
8708         }
8709 }
8710
8711 static void hclge_get_link_mode(struct hnae3_handle *handle,
8712                                 unsigned long *supported,
8713                                 unsigned long *advertising)
8714 {
8715         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8716         struct hclge_vport *vport = hclge_get_vport(handle);
8717         struct hclge_dev *hdev = vport->back;
8718         unsigned int idx = 0;
8719
8720         for (; idx < size; idx++) {
8721                 supported[idx] = hdev->hw.mac.supported[idx];
8722                 advertising[idx] = hdev->hw.mac.advertising[idx];
8723         }
8724 }
8725
8726 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8727 {
8728         struct hclge_vport *vport = hclge_get_vport(handle);
8729         struct hclge_dev *hdev = vport->back;
8730
8731         return hclge_config_gro(hdev, enable);
8732 }
8733
8734 static const struct hnae3_ae_ops hclge_ops = {
8735         .init_ae_dev = hclge_init_ae_dev,
8736         .uninit_ae_dev = hclge_uninit_ae_dev,
8737         .flr_prepare = hclge_flr_prepare,
8738         .flr_done = hclge_flr_done,
8739         .init_client_instance = hclge_init_client_instance,
8740         .uninit_client_instance = hclge_uninit_client_instance,
8741         .map_ring_to_vector = hclge_map_ring_to_vector,
8742         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8743         .get_vector = hclge_get_vector,
8744         .put_vector = hclge_put_vector,
8745         .set_promisc_mode = hclge_set_promisc_mode,
8746         .set_loopback = hclge_set_loopback,
8747         .start = hclge_ae_start,
8748         .stop = hclge_ae_stop,
8749         .client_start = hclge_client_start,
8750         .client_stop = hclge_client_stop,
8751         .get_status = hclge_get_status,
8752         .get_ksettings_an_result = hclge_get_ksettings_an_result,
8753         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8754         .get_media_type = hclge_get_media_type,
8755         .check_port_speed = hclge_check_port_speed,
8756         .get_rss_key_size = hclge_get_rss_key_size,
8757         .get_rss_indir_size = hclge_get_rss_indir_size,
8758         .get_rss = hclge_get_rss,
8759         .set_rss = hclge_set_rss,
8760         .set_rss_tuple = hclge_set_rss_tuple,
8761         .get_rss_tuple = hclge_get_rss_tuple,
8762         .get_tc_size = hclge_get_tc_size,
8763         .get_mac_addr = hclge_get_mac_addr,
8764         .set_mac_addr = hclge_set_mac_addr,
8765         .do_ioctl = hclge_do_ioctl,
8766         .add_uc_addr = hclge_add_uc_addr,
8767         .rm_uc_addr = hclge_rm_uc_addr,
8768         .add_mc_addr = hclge_add_mc_addr,
8769         .rm_mc_addr = hclge_rm_mc_addr,
8770         .set_autoneg = hclge_set_autoneg,
8771         .get_autoneg = hclge_get_autoneg,
8772         .restart_autoneg = hclge_restart_autoneg,
8773         .get_pauseparam = hclge_get_pauseparam,
8774         .set_pauseparam = hclge_set_pauseparam,
8775         .set_mtu = hclge_set_mtu,
8776         .reset_queue = hclge_reset_tqp,
8777         .get_stats = hclge_get_stats,
8778         .get_mac_pause_stats = hclge_get_mac_pause_stat,
8779         .update_stats = hclge_update_stats,
8780         .get_strings = hclge_get_strings,
8781         .get_sset_count = hclge_get_sset_count,
8782         .get_fw_version = hclge_get_fw_version,
8783         .get_mdix_mode = hclge_get_mdix_mode,
8784         .enable_vlan_filter = hclge_enable_vlan_filter,
8785         .set_vlan_filter = hclge_set_vlan_filter,
8786         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8787         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8788         .reset_event = hclge_reset_event,
8789         .set_default_reset_request = hclge_set_def_reset_request,
8790         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8791         .set_channels = hclge_set_channels,
8792         .get_channels = hclge_get_channels,
8793         .get_regs_len = hclge_get_regs_len,
8794         .get_regs = hclge_get_regs,
8795         .set_led_id = hclge_set_led_id,
8796         .get_link_mode = hclge_get_link_mode,
8797         .add_fd_entry = hclge_add_fd_entry,
8798         .del_fd_entry = hclge_del_fd_entry,
8799         .del_all_fd_entries = hclge_del_all_fd_entries,
8800         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8801         .get_fd_rule_info = hclge_get_fd_rule_info,
8802         .get_fd_all_rules = hclge_get_all_rules,
8803         .restore_fd_rules = hclge_restore_fd_entries,
8804         .enable_fd = hclge_enable_fd,
8805         .dbg_run_cmd = hclge_dbg_run_cmd,
8806         .handle_hw_ras_error = hclge_handle_hw_ras_error,
8807         .get_hw_reset_stat = hclge_get_hw_reset_stat,
8808         .ae_dev_resetting = hclge_ae_dev_resetting,
8809         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8810         .set_gro_en = hclge_gro_en,
8811         .get_global_queue_id = hclge_covert_handle_qid_global,
8812         .set_timer_task = hclge_set_timer_task,
8813         .mac_connect_phy = hclge_mac_connect_phy,
8814         .mac_disconnect_phy = hclge_mac_disconnect_phy,
8815 };
8816
8817 static struct hnae3_ae_algo ae_algo = {
8818         .ops = &hclge_ops,
8819         .pdev_id_table = ae_algo_pci_tbl,
8820 };
8821
8822 static int hclge_init(void)
8823 {
8824         pr_info("%s is initializing\n", HCLGE_NAME);
8825
8826         hnae3_register_ae_algo(&ae_algo);
8827
8828         return 0;
8829 }
8830
8831 static void hclge_exit(void)
8832 {
8833         hnae3_unregister_ae_algo(&ae_algo);
8834 }
8835 module_init(hclge_init);
8836 module_exit(hclge_exit);
8837
8838 MODULE_LICENSE("GPL");
8839 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8840 MODULE_DESCRIPTION("HCLGE Driver");
8841 MODULE_VERSION(HCLGE_MOD_VERSION);