net: hns3: fix some coding style issues
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33
34 #define HCLGE_RESET_MAX_FAIL_CNT        5
35
36 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
37 static int hclge_init_vlan_config(struct hclge_dev *hdev);
38 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
39 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
40 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
41                                u16 *allocated_size, bool is_alloc);
42 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
43 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
44
45 static struct hnae3_ae_algo ae_algo;
46
47 static const struct pci_device_id ae_algo_pci_tbl[] = {
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
49         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
51         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
52         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
53         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
54         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
55         /* required last entry */
56         {0, }
57 };
58
59 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
60
61 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
62                                          HCLGE_CMDQ_TX_ADDR_H_REG,
63                                          HCLGE_CMDQ_TX_DEPTH_REG,
64                                          HCLGE_CMDQ_TX_TAIL_REG,
65                                          HCLGE_CMDQ_TX_HEAD_REG,
66                                          HCLGE_CMDQ_RX_ADDR_L_REG,
67                                          HCLGE_CMDQ_RX_ADDR_H_REG,
68                                          HCLGE_CMDQ_RX_DEPTH_REG,
69                                          HCLGE_CMDQ_RX_TAIL_REG,
70                                          HCLGE_CMDQ_RX_HEAD_REG,
71                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
72                                          HCLGE_CMDQ_INTR_STS_REG,
73                                          HCLGE_CMDQ_INTR_EN_REG,
74                                          HCLGE_CMDQ_INTR_GEN_REG};
75
76 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
77                                            HCLGE_VECTOR0_OTER_EN_REG,
78                                            HCLGE_MISC_RESET_STS_REG,
79                                            HCLGE_MISC_VECTOR_INT_STS,
80                                            HCLGE_GLOBAL_RESET_REG,
81                                            HCLGE_FUN_RST_ING,
82                                            HCLGE_GRO_EN_REG};
83
84 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
85                                          HCLGE_RING_RX_ADDR_H_REG,
86                                          HCLGE_RING_RX_BD_NUM_REG,
87                                          HCLGE_RING_RX_BD_LENGTH_REG,
88                                          HCLGE_RING_RX_MERGE_EN_REG,
89                                          HCLGE_RING_RX_TAIL_REG,
90                                          HCLGE_RING_RX_HEAD_REG,
91                                          HCLGE_RING_RX_FBD_NUM_REG,
92                                          HCLGE_RING_RX_OFFSET_REG,
93                                          HCLGE_RING_RX_FBD_OFFSET_REG,
94                                          HCLGE_RING_RX_STASH_REG,
95                                          HCLGE_RING_RX_BD_ERR_REG,
96                                          HCLGE_RING_TX_ADDR_L_REG,
97                                          HCLGE_RING_TX_ADDR_H_REG,
98                                          HCLGE_RING_TX_BD_NUM_REG,
99                                          HCLGE_RING_TX_PRIORITY_REG,
100                                          HCLGE_RING_TX_TC_REG,
101                                          HCLGE_RING_TX_MERGE_EN_REG,
102                                          HCLGE_RING_TX_TAIL_REG,
103                                          HCLGE_RING_TX_HEAD_REG,
104                                          HCLGE_RING_TX_FBD_NUM_REG,
105                                          HCLGE_RING_TX_OFFSET_REG,
106                                          HCLGE_RING_TX_EBD_NUM_REG,
107                                          HCLGE_RING_TX_EBD_OFFSET_REG,
108                                          HCLGE_RING_TX_BD_ERR_REG,
109                                          HCLGE_RING_EN_REG};
110
111 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
112                                              HCLGE_TQP_INTR_GL0_REG,
113                                              HCLGE_TQP_INTR_GL1_REG,
114                                              HCLGE_TQP_INTR_GL2_REG,
115                                              HCLGE_TQP_INTR_RL_REG};
116
117 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
118         "App    Loopback test",
119         "Serdes serial Loopback test",
120         "Serdes parallel Loopback test",
121         "Phy    Loopback test"
122 };
123
124 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
125         {"mac_tx_mac_pause_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
127         {"mac_rx_mac_pause_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
129         {"mac_tx_control_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
131         {"mac_rx_control_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
133         {"mac_tx_pfc_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
135         {"mac_tx_pfc_pri0_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
137         {"mac_tx_pfc_pri1_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
139         {"mac_tx_pfc_pri2_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
141         {"mac_tx_pfc_pri3_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
143         {"mac_tx_pfc_pri4_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
145         {"mac_tx_pfc_pri5_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
147         {"mac_tx_pfc_pri6_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
149         {"mac_tx_pfc_pri7_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
151         {"mac_rx_pfc_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
153         {"mac_rx_pfc_pri0_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
155         {"mac_rx_pfc_pri1_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
157         {"mac_rx_pfc_pri2_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
159         {"mac_rx_pfc_pri3_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
161         {"mac_rx_pfc_pri4_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
163         {"mac_rx_pfc_pri5_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
165         {"mac_rx_pfc_pri6_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
167         {"mac_rx_pfc_pri7_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
169         {"mac_tx_total_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
171         {"mac_tx_total_oct_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
173         {"mac_tx_good_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
175         {"mac_tx_bad_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
177         {"mac_tx_good_oct_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
179         {"mac_tx_bad_oct_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
181         {"mac_tx_uni_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
183         {"mac_tx_multi_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
185         {"mac_tx_broad_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
187         {"mac_tx_undersize_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
189         {"mac_tx_oversize_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
191         {"mac_tx_64_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
193         {"mac_tx_65_127_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
195         {"mac_tx_128_255_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
197         {"mac_tx_256_511_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
199         {"mac_tx_512_1023_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
201         {"mac_tx_1024_1518_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
203         {"mac_tx_1519_2047_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
205         {"mac_tx_2048_4095_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
207         {"mac_tx_4096_8191_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
209         {"mac_tx_8192_9216_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
211         {"mac_tx_9217_12287_oct_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
213         {"mac_tx_12288_16383_oct_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
215         {"mac_tx_1519_max_good_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
217         {"mac_tx_1519_max_bad_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
219         {"mac_rx_total_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
221         {"mac_rx_total_oct_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
223         {"mac_rx_good_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
225         {"mac_rx_bad_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
227         {"mac_rx_good_oct_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
229         {"mac_rx_bad_oct_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
231         {"mac_rx_uni_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
233         {"mac_rx_multi_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
235         {"mac_rx_broad_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
237         {"mac_rx_undersize_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
239         {"mac_rx_oversize_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
241         {"mac_rx_64_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
243         {"mac_rx_65_127_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
245         {"mac_rx_128_255_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
247         {"mac_rx_256_511_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
249         {"mac_rx_512_1023_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
251         {"mac_rx_1024_1518_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
253         {"mac_rx_1519_2047_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
255         {"mac_rx_2048_4095_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
257         {"mac_rx_4096_8191_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
259         {"mac_rx_8192_9216_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
261         {"mac_rx_9217_12287_oct_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
263         {"mac_rx_12288_16383_oct_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
265         {"mac_rx_1519_max_good_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
267         {"mac_rx_1519_max_bad_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
269
270         {"mac_tx_fragment_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
272         {"mac_tx_undermin_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
274         {"mac_tx_jabber_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
276         {"mac_tx_err_all_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
278         {"mac_tx_from_app_good_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
280         {"mac_tx_from_app_bad_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
282         {"mac_rx_fragment_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
284         {"mac_rx_undermin_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
286         {"mac_rx_jabber_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
288         {"mac_rx_fcs_err_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
290         {"mac_rx_send_app_good_pkt_num",
291                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
292         {"mac_rx_send_app_bad_pkt_num",
293                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
294 };
295
296 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
297         {
298                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
299                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
300                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
301                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
302                 .i_port_bitmap = 0x1,
303         },
304 };
305
306 static const u8 hclge_hash_key[] = {
307         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
308         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
309         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
310         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
311         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
312 };
313
314 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
315 {
316 #define HCLGE_MAC_CMD_NUM 21
317
318         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
319         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
320         __le64 *desc_data;
321         int i, k, n;
322         int ret;
323
324         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
325         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
326         if (ret) {
327                 dev_err(&hdev->pdev->dev,
328                         "Get MAC pkt stats fail, status = %d.\n", ret);
329
330                 return ret;
331         }
332
333         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
334                 /* for special opcode 0032, only the first desc has the head */
335                 if (unlikely(i == 0)) {
336                         desc_data = (__le64 *)(&desc[i].data[0]);
337                         n = HCLGE_RD_FIRST_STATS_NUM;
338                 } else {
339                         desc_data = (__le64 *)(&desc[i]);
340                         n = HCLGE_RD_OTHER_STATS_NUM;
341                 }
342
343                 for (k = 0; k < n; k++) {
344                         *data += le64_to_cpu(*desc_data);
345                         data++;
346                         desc_data++;
347                 }
348         }
349
350         return 0;
351 }
352
353 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
354 {
355         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
356         struct hclge_desc *desc;
357         __le64 *desc_data;
358         u16 i, k, n;
359         int ret;
360
361         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
362         if (!desc)
363                 return -ENOMEM;
364         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
365         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
366         if (ret) {
367                 kfree(desc);
368                 return ret;
369         }
370
371         for (i = 0; i < desc_num; i++) {
372                 /* for special opcode 0034, only the first desc has the head */
373                 if (i == 0) {
374                         desc_data = (__le64 *)(&desc[i].data[0]);
375                         n = HCLGE_RD_FIRST_STATS_NUM;
376                 } else {
377                         desc_data = (__le64 *)(&desc[i]);
378                         n = HCLGE_RD_OTHER_STATS_NUM;
379                 }
380
381                 for (k = 0; k < n; k++) {
382                         *data += le64_to_cpu(*desc_data);
383                         data++;
384                         desc_data++;
385                 }
386         }
387
388         kfree(desc);
389
390         return 0;
391 }
392
393 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
394 {
395         struct hclge_desc desc;
396         __le32 *desc_data;
397         u32 reg_num;
398         int ret;
399
400         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
401         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
402         if (ret)
403                 return ret;
404
405         desc_data = (__le32 *)(&desc.data[0]);
406         reg_num = le32_to_cpu(*desc_data);
407
408         *desc_num = 1 + ((reg_num - 3) >> 2) +
409                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
410
411         return 0;
412 }
413
414 static int hclge_mac_update_stats(struct hclge_dev *hdev)
415 {
416         u32 desc_num;
417         int ret;
418
419         ret = hclge_mac_query_reg_num(hdev, &desc_num);
420
421         /* The firmware supports the new statistics acquisition method */
422         if (!ret)
423                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
424         else if (ret == -EOPNOTSUPP)
425                 ret = hclge_mac_update_stats_defective(hdev);
426         else
427                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
428
429         return ret;
430 }
431
432 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
433 {
434         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
435         struct hclge_vport *vport = hclge_get_vport(handle);
436         struct hclge_dev *hdev = vport->back;
437         struct hnae3_queue *queue;
438         struct hclge_desc desc[1];
439         struct hclge_tqp *tqp;
440         int ret, i;
441
442         for (i = 0; i < kinfo->num_tqps; i++) {
443                 queue = handle->kinfo.tqp[i];
444                 tqp = container_of(queue, struct hclge_tqp, q);
445                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
446                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
447                                            true);
448
449                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
450                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
451                 if (ret) {
452                         dev_err(&hdev->pdev->dev,
453                                 "Query tqp stat fail, status = %d,queue = %d\n",
454                                 ret, i);
455                         return ret;
456                 }
457                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
458                         le32_to_cpu(desc[0].data[1]);
459         }
460
461         for (i = 0; i < kinfo->num_tqps; i++) {
462                 queue = handle->kinfo.tqp[i];
463                 tqp = container_of(queue, struct hclge_tqp, q);
464                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
465                 hclge_cmd_setup_basic_desc(&desc[0],
466                                            HCLGE_OPC_QUERY_TX_STATUS,
467                                            true);
468
469                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
470                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
471                 if (ret) {
472                         dev_err(&hdev->pdev->dev,
473                                 "Query tqp stat fail, status = %d,queue = %d\n",
474                                 ret, i);
475                         return ret;
476                 }
477                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
478                         le32_to_cpu(desc[0].data[1]);
479         }
480
481         return 0;
482 }
483
484 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
485 {
486         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
487         struct hclge_tqp *tqp;
488         u64 *buff = data;
489         int i;
490
491         for (i = 0; i < kinfo->num_tqps; i++) {
492                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
494         }
495
496         for (i = 0; i < kinfo->num_tqps; i++) {
497                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
498                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
499         }
500
501         return buff;
502 }
503
504 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
505 {
506         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
507
508         /* each tqp has TX & RX two queues */
509         return kinfo->num_tqps * (2);
510 }
511
512 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
513 {
514         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
515         u8 *buff = data;
516         int i = 0;
517
518         for (i = 0; i < kinfo->num_tqps; i++) {
519                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
520                         struct hclge_tqp, q);
521                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
522                          tqp->index);
523                 buff = buff + ETH_GSTRING_LEN;
524         }
525
526         for (i = 0; i < kinfo->num_tqps; i++) {
527                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
528                         struct hclge_tqp, q);
529                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
530                          tqp->index);
531                 buff = buff + ETH_GSTRING_LEN;
532         }
533
534         return buff;
535 }
536
537 static u64 *hclge_comm_get_stats(void *comm_stats,
538                                  const struct hclge_comm_stats_str strs[],
539                                  int size, u64 *data)
540 {
541         u64 *buf = data;
542         u32 i;
543
544         for (i = 0; i < size; i++)
545                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
546
547         return buf + size;
548 }
549
550 static u8 *hclge_comm_get_strings(u32 stringset,
551                                   const struct hclge_comm_stats_str strs[],
552                                   int size, u8 *data)
553 {
554         char *buff = (char *)data;
555         u32 i;
556
557         if (stringset != ETH_SS_STATS)
558                 return buff;
559
560         for (i = 0; i < size; i++) {
561                 snprintf(buff, ETH_GSTRING_LEN,
562                          strs[i].desc);
563                 buff = buff + ETH_GSTRING_LEN;
564         }
565
566         return (u8 *)buff;
567 }
568
569 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
570 {
571         struct hnae3_handle *handle;
572         int status;
573
574         handle = &hdev->vport[0].nic;
575         if (handle->client) {
576                 status = hclge_tqps_update_stats(handle);
577                 if (status) {
578                         dev_err(&hdev->pdev->dev,
579                                 "Update TQPS stats fail, status = %d.\n",
580                                 status);
581                 }
582         }
583
584         status = hclge_mac_update_stats(hdev);
585         if (status)
586                 dev_err(&hdev->pdev->dev,
587                         "Update MAC stats fail, status = %d.\n", status);
588 }
589
590 static void hclge_update_stats(struct hnae3_handle *handle,
591                                struct net_device_stats *net_stats)
592 {
593         struct hclge_vport *vport = hclge_get_vport(handle);
594         struct hclge_dev *hdev = vport->back;
595         int status;
596
597         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
598                 return;
599
600         status = hclge_mac_update_stats(hdev);
601         if (status)
602                 dev_err(&hdev->pdev->dev,
603                         "Update MAC stats fail, status = %d.\n",
604                         status);
605
606         status = hclge_tqps_update_stats(handle);
607         if (status)
608                 dev_err(&hdev->pdev->dev,
609                         "Update TQPS stats fail, status = %d.\n",
610                         status);
611
612         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
613 }
614
615 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
616 {
617 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
618                 HNAE3_SUPPORT_PHY_LOOPBACK |\
619                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
620                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
621
622         struct hclge_vport *vport = hclge_get_vport(handle);
623         struct hclge_dev *hdev = vport->back;
624         int count = 0;
625
626         /* Loopback test support rules:
627          * mac: only GE mode support
628          * serdes: all mac mode will support include GE/XGE/LGE/CGE
629          * phy: only support when phy device exist on board
630          */
631         if (stringset == ETH_SS_TEST) {
632                 /* clear loopback bit flags at first */
633                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
634                 if (hdev->pdev->revision >= 0x21 ||
635                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
636                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
637                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
638                         count += 1;
639                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
640                 }
641
642                 count += 2;
643                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
644                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
645         } else if (stringset == ETH_SS_STATS) {
646                 count = ARRAY_SIZE(g_mac_stats_string) +
647                         hclge_tqps_get_sset_count(handle, stringset);
648         }
649
650         return count;
651 }
652
653 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
654                               u8 *data)
655 {
656         u8 *p = (char *)data;
657         int size;
658
659         if (stringset == ETH_SS_STATS) {
660                 size = ARRAY_SIZE(g_mac_stats_string);
661                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
662                                            size, p);
663                 p = hclge_tqps_get_strings(handle, p);
664         } else if (stringset == ETH_SS_TEST) {
665                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
666                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
667                                ETH_GSTRING_LEN);
668                         p += ETH_GSTRING_LEN;
669                 }
670                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
671                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
672                                ETH_GSTRING_LEN);
673                         p += ETH_GSTRING_LEN;
674                 }
675                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
676                         memcpy(p,
677                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
678                                ETH_GSTRING_LEN);
679                         p += ETH_GSTRING_LEN;
680                 }
681                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
682                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
683                                ETH_GSTRING_LEN);
684                         p += ETH_GSTRING_LEN;
685                 }
686         }
687 }
688
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
690 {
691         struct hclge_vport *vport = hclge_get_vport(handle);
692         struct hclge_dev *hdev = vport->back;
693         u64 *p;
694
695         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
696                                  ARRAY_SIZE(g_mac_stats_string), data);
697         p = hclge_tqps_get_stats(handle, p);
698 }
699
700 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
701                                      u64 *rx_cnt)
702 {
703         struct hclge_vport *vport = hclge_get_vport(handle);
704         struct hclge_dev *hdev = vport->back;
705
706         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
707         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
708 }
709
710 static int hclge_parse_func_status(struct hclge_dev *hdev,
711                                    struct hclge_func_status_cmd *status)
712 {
713         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
714                 return -EINVAL;
715
716         /* Set the pf to main pf */
717         if (status->pf_state & HCLGE_PF_STATE_MAIN)
718                 hdev->flag |= HCLGE_FLAG_MAIN;
719         else
720                 hdev->flag &= ~HCLGE_FLAG_MAIN;
721
722         return 0;
723 }
724
725 static int hclge_query_function_status(struct hclge_dev *hdev)
726 {
727 #define HCLGE_QUERY_MAX_CNT     5
728
729         struct hclge_func_status_cmd *req;
730         struct hclge_desc desc;
731         int timeout = 0;
732         int ret;
733
734         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
735         req = (struct hclge_func_status_cmd *)desc.data;
736
737         do {
738                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
739                 if (ret) {
740                         dev_err(&hdev->pdev->dev,
741                                 "query function status failed %d.\n", ret);
742                         return ret;
743                 }
744
745                 /* Check pf reset is done */
746                 if (req->pf_state)
747                         break;
748                 usleep_range(1000, 2000);
749         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
750
751         ret = hclge_parse_func_status(hdev, req);
752
753         return ret;
754 }
755
756 static int hclge_query_pf_resource(struct hclge_dev *hdev)
757 {
758         struct hclge_pf_res_cmd *req;
759         struct hclge_desc desc;
760         int ret;
761
762         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
763         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
764         if (ret) {
765                 dev_err(&hdev->pdev->dev,
766                         "query pf resource failed %d.\n", ret);
767                 return ret;
768         }
769
770         req = (struct hclge_pf_res_cmd *)desc.data;
771         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
772         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
773
774         if (req->tx_buf_size)
775                 hdev->tx_buf_size =
776                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
777         else
778                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
779
780         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
781
782         if (req->dv_buf_size)
783                 hdev->dv_buf_size =
784                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
785         else
786                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
787
788         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
789
790         if (hnae3_dev_roce_supported(hdev)) {
791                 hdev->roce_base_msix_offset =
792                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
793                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
794                 hdev->num_roce_msi =
795                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
796                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
797
798                 /* PF should have NIC vectors and Roce vectors,
799                  * NIC vectors are queued before Roce vectors.
800                  */
801                 hdev->num_msi = hdev->num_roce_msi +
802                                 hdev->roce_base_msix_offset;
803         } else {
804                 hdev->num_msi =
805                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
806                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
807         }
808
809         return 0;
810 }
811
812 static int hclge_parse_speed(int speed_cmd, int *speed)
813 {
814         switch (speed_cmd) {
815         case 6:
816                 *speed = HCLGE_MAC_SPEED_10M;
817                 break;
818         case 7:
819                 *speed = HCLGE_MAC_SPEED_100M;
820                 break;
821         case 0:
822                 *speed = HCLGE_MAC_SPEED_1G;
823                 break;
824         case 1:
825                 *speed = HCLGE_MAC_SPEED_10G;
826                 break;
827         case 2:
828                 *speed = HCLGE_MAC_SPEED_25G;
829                 break;
830         case 3:
831                 *speed = HCLGE_MAC_SPEED_40G;
832                 break;
833         case 4:
834                 *speed = HCLGE_MAC_SPEED_50G;
835                 break;
836         case 5:
837                 *speed = HCLGE_MAC_SPEED_100G;
838                 break;
839         default:
840                 return -EINVAL;
841         }
842
843         return 0;
844 }
845
846 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
847 {
848         struct hclge_vport *vport = hclge_get_vport(handle);
849         struct hclge_dev *hdev = vport->back;
850         u32 speed_ability = hdev->hw.mac.speed_ability;
851         u32 speed_bit = 0;
852
853         switch (speed) {
854         case HCLGE_MAC_SPEED_10M:
855                 speed_bit = HCLGE_SUPPORT_10M_BIT;
856                 break;
857         case HCLGE_MAC_SPEED_100M:
858                 speed_bit = HCLGE_SUPPORT_100M_BIT;
859                 break;
860         case HCLGE_MAC_SPEED_1G:
861                 speed_bit = HCLGE_SUPPORT_1G_BIT;
862                 break;
863         case HCLGE_MAC_SPEED_10G:
864                 speed_bit = HCLGE_SUPPORT_10G_BIT;
865                 break;
866         case HCLGE_MAC_SPEED_25G:
867                 speed_bit = HCLGE_SUPPORT_25G_BIT;
868                 break;
869         case HCLGE_MAC_SPEED_40G:
870                 speed_bit = HCLGE_SUPPORT_40G_BIT;
871                 break;
872         case HCLGE_MAC_SPEED_50G:
873                 speed_bit = HCLGE_SUPPORT_50G_BIT;
874                 break;
875         case HCLGE_MAC_SPEED_100G:
876                 speed_bit = HCLGE_SUPPORT_100G_BIT;
877                 break;
878         default:
879                 return -EINVAL;
880         }
881
882         if (speed_bit & speed_ability)
883                 return 0;
884
885         return -EINVAL;
886 }
887
888 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
889 {
890         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
891                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
892                                  mac->supported);
893         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
894                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
895                                  mac->supported);
896         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
897                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
898                                  mac->supported);
899         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
900                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
901                                  mac->supported);
902         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
903                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
904                                  mac->supported);
905 }
906
907 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
908 {
909         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
910                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
911                                  mac->supported);
912         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
913                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
914                                  mac->supported);
915         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
916                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
917                                  mac->supported);
918         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
919                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
920                                  mac->supported);
921         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
922                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
923                                  mac->supported);
924 }
925
926 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
927 {
928         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
929                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
930                                  mac->supported);
931         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
932                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
933                                  mac->supported);
934         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
935                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
936                                  mac->supported);
937         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
938                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
939                                  mac->supported);
940         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
941                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
942                                  mac->supported);
943 }
944
945 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
946 {
947         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
948                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
949                                  mac->supported);
950         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
951                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
952                                  mac->supported);
953         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
954                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
955                                  mac->supported);
956         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
957                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
958                                  mac->supported);
959         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
960                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
961                                  mac->supported);
962         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
963                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
964                                  mac->supported);
965 }
966
967 static void hclge_convert_setting_fec(struct hclge_mac *mac)
968 {
969         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
970         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
971
972         switch (mac->speed) {
973         case HCLGE_MAC_SPEED_10G:
974         case HCLGE_MAC_SPEED_40G:
975                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
976                                  mac->supported);
977                 mac->fec_ability =
978                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
979                 break;
980         case HCLGE_MAC_SPEED_25G:
981         case HCLGE_MAC_SPEED_50G:
982                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
983                                  mac->supported);
984                 mac->fec_ability =
985                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
986                         BIT(HNAE3_FEC_AUTO);
987                 break;
988         case HCLGE_MAC_SPEED_100G:
989                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
990                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
991                 break;
992         default:
993                 mac->fec_ability = 0;
994                 break;
995         }
996 }
997
998 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
999                                         u8 speed_ability)
1000 {
1001         struct hclge_mac *mac = &hdev->hw.mac;
1002
1003         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1004                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1005                                  mac->supported);
1006
1007         hclge_convert_setting_sr(mac, speed_ability);
1008         hclge_convert_setting_lr(mac, speed_ability);
1009         hclge_convert_setting_cr(mac, speed_ability);
1010         if (hdev->pdev->revision >= 0x21)
1011                 hclge_convert_setting_fec(mac);
1012
1013         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1014         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1015         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1016 }
1017
1018 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1019                                             u8 speed_ability)
1020 {
1021         struct hclge_mac *mac = &hdev->hw.mac;
1022
1023         hclge_convert_setting_kr(mac, speed_ability);
1024         if (hdev->pdev->revision >= 0x21)
1025                 hclge_convert_setting_fec(mac);
1026         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1027         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1028         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1029 }
1030
1031 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1032                                          u8 speed_ability)
1033 {
1034         unsigned long *supported = hdev->hw.mac.supported;
1035
1036         /* default to support all speed for GE port */
1037         if (!speed_ability)
1038                 speed_ability = HCLGE_SUPPORT_GE;
1039
1040         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1041                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1042                                  supported);
1043
1044         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1046                                  supported);
1047                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1048                                  supported);
1049         }
1050
1051         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1052                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1053                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1054         }
1055
1056         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1057         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1058         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1059 }
1060
1061 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1062 {
1063         u8 media_type = hdev->hw.mac.media_type;
1064
1065         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1066                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1067         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1068                 hclge_parse_copper_link_mode(hdev, speed_ability);
1069         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1070                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1071 }
1072 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1073 {
1074         struct hclge_cfg_param_cmd *req;
1075         u64 mac_addr_tmp_high;
1076         u64 mac_addr_tmp;
1077         int i;
1078
1079         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1080
1081         /* get the configuration */
1082         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1083                                               HCLGE_CFG_VMDQ_M,
1084                                               HCLGE_CFG_VMDQ_S);
1085         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1086                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1087         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1088                                             HCLGE_CFG_TQP_DESC_N_M,
1089                                             HCLGE_CFG_TQP_DESC_N_S);
1090
1091         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1092                                         HCLGE_CFG_PHY_ADDR_M,
1093                                         HCLGE_CFG_PHY_ADDR_S);
1094         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1095                                           HCLGE_CFG_MEDIA_TP_M,
1096                                           HCLGE_CFG_MEDIA_TP_S);
1097         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1098                                           HCLGE_CFG_RX_BUF_LEN_M,
1099                                           HCLGE_CFG_RX_BUF_LEN_S);
1100         /* get mac_address */
1101         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1102         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1103                                             HCLGE_CFG_MAC_ADDR_H_M,
1104                                             HCLGE_CFG_MAC_ADDR_H_S);
1105
1106         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1107
1108         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1109                                              HCLGE_CFG_DEFAULT_SPEED_M,
1110                                              HCLGE_CFG_DEFAULT_SPEED_S);
1111         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1112                                             HCLGE_CFG_RSS_SIZE_M,
1113                                             HCLGE_CFG_RSS_SIZE_S);
1114
1115         for (i = 0; i < ETH_ALEN; i++)
1116                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1117
1118         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1119         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1120
1121         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1122                                              HCLGE_CFG_SPEED_ABILITY_M,
1123                                              HCLGE_CFG_SPEED_ABILITY_S);
1124         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1125                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1126                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1127         if (!cfg->umv_space)
1128                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1129 }
1130
1131 /* hclge_get_cfg: query the static parameter from flash
1132  * @hdev: pointer to struct hclge_dev
1133  * @hcfg: the config structure to be getted
1134  */
1135 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1136 {
1137         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1138         struct hclge_cfg_param_cmd *req;
1139         int i, ret;
1140
1141         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1142                 u32 offset = 0;
1143
1144                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1145                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1146                                            true);
1147                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1148                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1149                 /* Len should be united by 4 bytes when send to hardware */
1150                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1151                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1152                 req->offset = cpu_to_le32(offset);
1153         }
1154
1155         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1156         if (ret) {
1157                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1158                 return ret;
1159         }
1160
1161         hclge_parse_cfg(hcfg, desc);
1162
1163         return 0;
1164 }
1165
1166 static int hclge_get_cap(struct hclge_dev *hdev)
1167 {
1168         int ret;
1169
1170         ret = hclge_query_function_status(hdev);
1171         if (ret) {
1172                 dev_err(&hdev->pdev->dev,
1173                         "query function status error %d.\n", ret);
1174                 return ret;
1175         }
1176
1177         /* get pf resource */
1178         ret = hclge_query_pf_resource(hdev);
1179         if (ret)
1180                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1181
1182         return ret;
1183 }
1184
1185 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1186 {
1187 #define HCLGE_MIN_TX_DESC       64
1188 #define HCLGE_MIN_RX_DESC       64
1189
1190         if (!is_kdump_kernel())
1191                 return;
1192
1193         dev_info(&hdev->pdev->dev,
1194                  "Running kdump kernel. Using minimal resources\n");
1195
1196         /* minimal queue pairs equals to the number of vports */
1197         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1198         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1199         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1200 }
1201
1202 static int hclge_configure(struct hclge_dev *hdev)
1203 {
1204         struct hclge_cfg cfg;
1205         int ret, i;
1206
1207         ret = hclge_get_cfg(hdev, &cfg);
1208         if (ret) {
1209                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1210                 return ret;
1211         }
1212
1213         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1214         hdev->base_tqp_pid = 0;
1215         hdev->rss_size_max = cfg.rss_size_max;
1216         hdev->rx_buf_len = cfg.rx_buf_len;
1217         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1218         hdev->hw.mac.media_type = cfg.media_type;
1219         hdev->hw.mac.phy_addr = cfg.phy_addr;
1220         hdev->num_tx_desc = cfg.tqp_desc_num;
1221         hdev->num_rx_desc = cfg.tqp_desc_num;
1222         hdev->tm_info.num_pg = 1;
1223         hdev->tc_max = cfg.tc_num;
1224         hdev->tm_info.hw_pfc_map = 0;
1225         hdev->wanted_umv_size = cfg.umv_space;
1226
1227         if (hnae3_dev_fd_supported(hdev)) {
1228                 hdev->fd_en = true;
1229                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1230         }
1231
1232         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1233         if (ret) {
1234                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1235                 return ret;
1236         }
1237
1238         hclge_parse_link_mode(hdev, cfg.speed_ability);
1239
1240         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1241             (hdev->tc_max < 1)) {
1242                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1243                          hdev->tc_max);
1244                 hdev->tc_max = 1;
1245         }
1246
1247         /* Dev does not support DCB */
1248         if (!hnae3_dev_dcb_supported(hdev)) {
1249                 hdev->tc_max = 1;
1250                 hdev->pfc_max = 0;
1251         } else {
1252                 hdev->pfc_max = hdev->tc_max;
1253         }
1254
1255         hdev->tm_info.num_tc = 1;
1256
1257         /* Currently not support uncontiuous tc */
1258         for (i = 0; i < hdev->tm_info.num_tc; i++)
1259                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1260
1261         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1262
1263         hclge_init_kdump_kernel_config(hdev);
1264
1265         return ret;
1266 }
1267
1268 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1269                             int tso_mss_max)
1270 {
1271         struct hclge_cfg_tso_status_cmd *req;
1272         struct hclge_desc desc;
1273         u16 tso_mss;
1274
1275         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1276
1277         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1278
1279         tso_mss = 0;
1280         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1281                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1282         req->tso_mss_min = cpu_to_le16(tso_mss);
1283
1284         tso_mss = 0;
1285         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1286                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1287         req->tso_mss_max = cpu_to_le16(tso_mss);
1288
1289         return hclge_cmd_send(&hdev->hw, &desc, 1);
1290 }
1291
1292 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1293 {
1294         struct hclge_cfg_gro_status_cmd *req;
1295         struct hclge_desc desc;
1296         int ret;
1297
1298         if (!hnae3_dev_gro_supported(hdev))
1299                 return 0;
1300
1301         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1302         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1303
1304         req->gro_en = cpu_to_le16(en ? 1 : 0);
1305
1306         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1307         if (ret)
1308                 dev_err(&hdev->pdev->dev,
1309                         "GRO hardware config cmd failed, ret = %d\n", ret);
1310
1311         return ret;
1312 }
1313
1314 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1315 {
1316         struct hclge_tqp *tqp;
1317         int i;
1318
1319         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1320                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1321         if (!hdev->htqp)
1322                 return -ENOMEM;
1323
1324         tqp = hdev->htqp;
1325
1326         for (i = 0; i < hdev->num_tqps; i++) {
1327                 tqp->dev = &hdev->pdev->dev;
1328                 tqp->index = i;
1329
1330                 tqp->q.ae_algo = &ae_algo;
1331                 tqp->q.buf_size = hdev->rx_buf_len;
1332                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1333                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1334                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1335                         i * HCLGE_TQP_REG_SIZE;
1336
1337                 tqp++;
1338         }
1339
1340         return 0;
1341 }
1342
1343 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1344                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1345 {
1346         struct hclge_tqp_map_cmd *req;
1347         struct hclge_desc desc;
1348         int ret;
1349
1350         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1351
1352         req = (struct hclge_tqp_map_cmd *)desc.data;
1353         req->tqp_id = cpu_to_le16(tqp_pid);
1354         req->tqp_vf = func_id;
1355         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1356                         1 << HCLGE_TQP_MAP_EN_B;
1357         req->tqp_vid = cpu_to_le16(tqp_vid);
1358
1359         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1360         if (ret)
1361                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1362
1363         return ret;
1364 }
1365
1366 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1367 {
1368         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1369         struct hclge_dev *hdev = vport->back;
1370         int i, alloced;
1371
1372         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1373              alloced < num_tqps; i++) {
1374                 if (!hdev->htqp[i].alloced) {
1375                         hdev->htqp[i].q.handle = &vport->nic;
1376                         hdev->htqp[i].q.tqp_index = alloced;
1377                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1378                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1379                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1380                         hdev->htqp[i].alloced = true;
1381                         alloced++;
1382                 }
1383         }
1384         vport->alloc_tqps = alloced;
1385         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1386                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1387
1388         return 0;
1389 }
1390
1391 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1392                             u16 num_tx_desc, u16 num_rx_desc)
1393
1394 {
1395         struct hnae3_handle *nic = &vport->nic;
1396         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1397         struct hclge_dev *hdev = vport->back;
1398         int ret;
1399
1400         kinfo->num_tx_desc = num_tx_desc;
1401         kinfo->num_rx_desc = num_rx_desc;
1402
1403         kinfo->rx_buf_len = hdev->rx_buf_len;
1404
1405         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1406                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1407         if (!kinfo->tqp)
1408                 return -ENOMEM;
1409
1410         ret = hclge_assign_tqp(vport, num_tqps);
1411         if (ret)
1412                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1413
1414         return ret;
1415 }
1416
1417 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1418                                   struct hclge_vport *vport)
1419 {
1420         struct hnae3_handle *nic = &vport->nic;
1421         struct hnae3_knic_private_info *kinfo;
1422         u16 i;
1423
1424         kinfo = &nic->kinfo;
1425         for (i = 0; i < vport->alloc_tqps; i++) {
1426                 struct hclge_tqp *q =
1427                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1428                 bool is_pf;
1429                 int ret;
1430
1431                 is_pf = !(vport->vport_id);
1432                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1433                                              i, is_pf);
1434                 if (ret)
1435                         return ret;
1436         }
1437
1438         return 0;
1439 }
1440
1441 static int hclge_map_tqp(struct hclge_dev *hdev)
1442 {
1443         struct hclge_vport *vport = hdev->vport;
1444         u16 i, num_vport;
1445
1446         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1447         for (i = 0; i < num_vport; i++) {
1448                 int ret;
1449
1450                 ret = hclge_map_tqp_to_vport(hdev, vport);
1451                 if (ret)
1452                         return ret;
1453
1454                 vport++;
1455         }
1456
1457         return 0;
1458 }
1459
1460 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1461 {
1462         struct hnae3_handle *nic = &vport->nic;
1463         struct hclge_dev *hdev = vport->back;
1464         int ret;
1465
1466         nic->pdev = hdev->pdev;
1467         nic->ae_algo = &ae_algo;
1468         nic->numa_node_mask = hdev->numa_node_mask;
1469
1470         ret = hclge_knic_setup(vport, num_tqps,
1471                                hdev->num_tx_desc, hdev->num_rx_desc);
1472         if (ret)
1473                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1474
1475         return ret;
1476 }
1477
1478 static int hclge_alloc_vport(struct hclge_dev *hdev)
1479 {
1480         struct pci_dev *pdev = hdev->pdev;
1481         struct hclge_vport *vport;
1482         u32 tqp_main_vport;
1483         u32 tqp_per_vport;
1484         int num_vport, i;
1485         int ret;
1486
1487         /* We need to alloc a vport for main NIC of PF */
1488         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1489
1490         if (hdev->num_tqps < num_vport) {
1491                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1492                         hdev->num_tqps, num_vport);
1493                 return -EINVAL;
1494         }
1495
1496         /* Alloc the same number of TQPs for every vport */
1497         tqp_per_vport = hdev->num_tqps / num_vport;
1498         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1499
1500         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1501                              GFP_KERNEL);
1502         if (!vport)
1503                 return -ENOMEM;
1504
1505         hdev->vport = vport;
1506         hdev->num_alloc_vport = num_vport;
1507
1508         if (IS_ENABLED(CONFIG_PCI_IOV))
1509                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1510
1511         for (i = 0; i < num_vport; i++) {
1512                 vport->back = hdev;
1513                 vport->vport_id = i;
1514                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1515                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1516                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1517                 INIT_LIST_HEAD(&vport->vlan_list);
1518                 INIT_LIST_HEAD(&vport->uc_mac_list);
1519                 INIT_LIST_HEAD(&vport->mc_mac_list);
1520
1521                 if (i == 0)
1522                         ret = hclge_vport_setup(vport, tqp_main_vport);
1523                 else
1524                         ret = hclge_vport_setup(vport, tqp_per_vport);
1525                 if (ret) {
1526                         dev_err(&pdev->dev,
1527                                 "vport setup failed for vport %d, %d\n",
1528                                 i, ret);
1529                         return ret;
1530                 }
1531
1532                 vport++;
1533         }
1534
1535         return 0;
1536 }
1537
1538 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1539                                     struct hclge_pkt_buf_alloc *buf_alloc)
1540 {
1541 /* TX buffer size is unit by 128 byte */
1542 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1543 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1544         struct hclge_tx_buff_alloc_cmd *req;
1545         struct hclge_desc desc;
1546         int ret;
1547         u8 i;
1548
1549         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1550
1551         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1552         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1553                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1554
1555                 req->tx_pkt_buff[i] =
1556                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1557                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1558         }
1559
1560         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1561         if (ret)
1562                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1563                         ret);
1564
1565         return ret;
1566 }
1567
1568 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1569                                  struct hclge_pkt_buf_alloc *buf_alloc)
1570 {
1571         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1572
1573         if (ret)
1574                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1575
1576         return ret;
1577 }
1578
1579 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1580 {
1581         int i, cnt = 0;
1582
1583         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1584                 if (hdev->hw_tc_map & BIT(i))
1585                         cnt++;
1586         return cnt;
1587 }
1588
1589 /* Get the number of pfc enabled TCs, which have private buffer */
1590 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1591                                   struct hclge_pkt_buf_alloc *buf_alloc)
1592 {
1593         struct hclge_priv_buf *priv;
1594         int i, cnt = 0;
1595
1596         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1597                 priv = &buf_alloc->priv_buf[i];
1598                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1599                     priv->enable)
1600                         cnt++;
1601         }
1602
1603         return cnt;
1604 }
1605
1606 /* Get the number of pfc disabled TCs, which have private buffer */
1607 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1608                                      struct hclge_pkt_buf_alloc *buf_alloc)
1609 {
1610         struct hclge_priv_buf *priv;
1611         int i, cnt = 0;
1612
1613         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614                 priv = &buf_alloc->priv_buf[i];
1615                 if (hdev->hw_tc_map & BIT(i) &&
1616                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1617                     priv->enable)
1618                         cnt++;
1619         }
1620
1621         return cnt;
1622 }
1623
1624 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1625 {
1626         struct hclge_priv_buf *priv;
1627         u32 rx_priv = 0;
1628         int i;
1629
1630         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631                 priv = &buf_alloc->priv_buf[i];
1632                 if (priv->enable)
1633                         rx_priv += priv->buf_size;
1634         }
1635         return rx_priv;
1636 }
1637
1638 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1639 {
1640         u32 i, total_tx_size = 0;
1641
1642         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1643                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1644
1645         return total_tx_size;
1646 }
1647
1648 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1649                                 struct hclge_pkt_buf_alloc *buf_alloc,
1650                                 u32 rx_all)
1651 {
1652         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1653         u32 tc_num = hclge_get_tc_num(hdev);
1654         u32 shared_buf, aligned_mps;
1655         u32 rx_priv;
1656         int i;
1657
1658         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1659
1660         if (hnae3_dev_dcb_supported(hdev))
1661                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1662                                         hdev->dv_buf_size;
1663         else
1664                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1665                                         + hdev->dv_buf_size;
1666
1667         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1668         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1669                              HCLGE_BUF_SIZE_UNIT);
1670
1671         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1672         if (rx_all < rx_priv + shared_std)
1673                 return false;
1674
1675         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1676         buf_alloc->s_buf.buf_size = shared_buf;
1677         if (hnae3_dev_dcb_supported(hdev)) {
1678                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1679                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1680                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1681                                   HCLGE_BUF_SIZE_UNIT);
1682         } else {
1683                 buf_alloc->s_buf.self.high = aligned_mps +
1684                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1685                 buf_alloc->s_buf.self.low = aligned_mps;
1686         }
1687
1688         if (hnae3_dev_dcb_supported(hdev)) {
1689                 if (tc_num)
1690                         hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1691                 else
1692                         hi_thrd = shared_buf - hdev->dv_buf_size;
1693
1694                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1695                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1696                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1697         } else {
1698                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1699                 lo_thrd = aligned_mps;
1700         }
1701
1702         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1703                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1704                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1705         }
1706
1707         return true;
1708 }
1709
1710 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1711                                 struct hclge_pkt_buf_alloc *buf_alloc)
1712 {
1713         u32 i, total_size;
1714
1715         total_size = hdev->pkt_buf_size;
1716
1717         /* alloc tx buffer for all enabled tc */
1718         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1719                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1720
1721                 if (hdev->hw_tc_map & BIT(i)) {
1722                         if (total_size < hdev->tx_buf_size)
1723                                 return -ENOMEM;
1724
1725                         priv->tx_buf_size = hdev->tx_buf_size;
1726                 } else {
1727                         priv->tx_buf_size = 0;
1728                 }
1729
1730                 total_size -= priv->tx_buf_size;
1731         }
1732
1733         return 0;
1734 }
1735
1736 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1737                                   struct hclge_pkt_buf_alloc *buf_alloc)
1738 {
1739         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1740         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1741         int i;
1742
1743         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1744                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1745
1746                 priv->enable = 0;
1747                 priv->wl.low = 0;
1748                 priv->wl.high = 0;
1749                 priv->buf_size = 0;
1750
1751                 if (!(hdev->hw_tc_map & BIT(i)))
1752                         continue;
1753
1754                 priv->enable = 1;
1755
1756                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1757                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1758                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1759                                                 HCLGE_BUF_SIZE_UNIT);
1760                 } else {
1761                         priv->wl.low = 0;
1762                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1763                                         aligned_mps;
1764                 }
1765
1766                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1767         }
1768
1769         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1770 }
1771
1772 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1773                                           struct hclge_pkt_buf_alloc *buf_alloc)
1774 {
1775         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1776         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1777         int i;
1778
1779         /* let the last to be cleared first */
1780         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1781                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1782
1783                 if (hdev->hw_tc_map & BIT(i) &&
1784                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1785                         /* Clear the no pfc TC private buffer */
1786                         priv->wl.low = 0;
1787                         priv->wl.high = 0;
1788                         priv->buf_size = 0;
1789                         priv->enable = 0;
1790                         no_pfc_priv_num--;
1791                 }
1792
1793                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1794                     no_pfc_priv_num == 0)
1795                         break;
1796         }
1797
1798         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1799 }
1800
1801 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1802                                         struct hclge_pkt_buf_alloc *buf_alloc)
1803 {
1804         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1805         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1806         int i;
1807
1808         /* let the last to be cleared first */
1809         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1810                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1811
1812                 if (hdev->hw_tc_map & BIT(i) &&
1813                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1814                         /* Reduce the number of pfc TC with private buffer */
1815                         priv->wl.low = 0;
1816                         priv->enable = 0;
1817                         priv->wl.high = 0;
1818                         priv->buf_size = 0;
1819                         pfc_priv_num--;
1820                 }
1821
1822                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1823                     pfc_priv_num == 0)
1824                         break;
1825         }
1826
1827         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1828 }
1829
1830 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1831  * @hdev: pointer to struct hclge_dev
1832  * @buf_alloc: pointer to buffer calculation data
1833  * @return: 0: calculate sucessful, negative: fail
1834  */
1835 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1836                                 struct hclge_pkt_buf_alloc *buf_alloc)
1837 {
1838         /* When DCB is not supported, rx private buffer is not allocated. */
1839         if (!hnae3_dev_dcb_supported(hdev)) {
1840                 u32 rx_all = hdev->pkt_buf_size;
1841
1842                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1843                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1844                         return -ENOMEM;
1845
1846                 return 0;
1847         }
1848
1849         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1850                 return 0;
1851
1852         /* try to decrease the buffer size */
1853         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1854                 return 0;
1855
1856         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1857                 return 0;
1858
1859         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1860                 return 0;
1861
1862         return -ENOMEM;
1863 }
1864
1865 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1866                                    struct hclge_pkt_buf_alloc *buf_alloc)
1867 {
1868         struct hclge_rx_priv_buff_cmd *req;
1869         struct hclge_desc desc;
1870         int ret;
1871         int i;
1872
1873         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1874         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1875
1876         /* Alloc private buffer TCs */
1877         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1878                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1879
1880                 req->buf_num[i] =
1881                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1882                 req->buf_num[i] |=
1883                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1884         }
1885
1886         req->shared_buf =
1887                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1888                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1889
1890         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1891         if (ret)
1892                 dev_err(&hdev->pdev->dev,
1893                         "rx private buffer alloc cmd failed %d\n", ret);
1894
1895         return ret;
1896 }
1897
1898 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1899                                    struct hclge_pkt_buf_alloc *buf_alloc)
1900 {
1901         struct hclge_rx_priv_wl_buf *req;
1902         struct hclge_priv_buf *priv;
1903         struct hclge_desc desc[2];
1904         int i, j;
1905         int ret;
1906
1907         for (i = 0; i < 2; i++) {
1908                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1909                                            false);
1910                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1911
1912                 /* The first descriptor set the NEXT bit to 1 */
1913                 if (i == 0)
1914                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1915                 else
1916                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1917
1918                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1919                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1920
1921                         priv = &buf_alloc->priv_buf[idx];
1922                         req->tc_wl[j].high =
1923                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1924                         req->tc_wl[j].high |=
1925                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1926                         req->tc_wl[j].low =
1927                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1928                         req->tc_wl[j].low |=
1929                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1930                 }
1931         }
1932
1933         /* Send 2 descriptor at one time */
1934         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1935         if (ret)
1936                 dev_err(&hdev->pdev->dev,
1937                         "rx private waterline config cmd failed %d\n",
1938                         ret);
1939         return ret;
1940 }
1941
1942 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1943                                     struct hclge_pkt_buf_alloc *buf_alloc)
1944 {
1945         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1946         struct hclge_rx_com_thrd *req;
1947         struct hclge_desc desc[2];
1948         struct hclge_tc_thrd *tc;
1949         int i, j;
1950         int ret;
1951
1952         for (i = 0; i < 2; i++) {
1953                 hclge_cmd_setup_basic_desc(&desc[i],
1954                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1955                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1956
1957                 /* The first descriptor set the NEXT bit to 1 */
1958                 if (i == 0)
1959                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1960                 else
1961                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1962
1963                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1964                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1965
1966                         req->com_thrd[j].high =
1967                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1968                         req->com_thrd[j].high |=
1969                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1970                         req->com_thrd[j].low =
1971                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1972                         req->com_thrd[j].low |=
1973                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1974                 }
1975         }
1976
1977         /* Send 2 descriptors at one time */
1978         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1979         if (ret)
1980                 dev_err(&hdev->pdev->dev,
1981                         "common threshold config cmd failed %d\n", ret);
1982         return ret;
1983 }
1984
1985 static int hclge_common_wl_config(struct hclge_dev *hdev,
1986                                   struct hclge_pkt_buf_alloc *buf_alloc)
1987 {
1988         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1989         struct hclge_rx_com_wl *req;
1990         struct hclge_desc desc;
1991         int ret;
1992
1993         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1994
1995         req = (struct hclge_rx_com_wl *)desc.data;
1996         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1997         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1998
1999         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2000         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2001
2002         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2003         if (ret)
2004                 dev_err(&hdev->pdev->dev,
2005                         "common waterline config cmd failed %d\n", ret);
2006
2007         return ret;
2008 }
2009
2010 int hclge_buffer_alloc(struct hclge_dev *hdev)
2011 {
2012         struct hclge_pkt_buf_alloc *pkt_buf;
2013         int ret;
2014
2015         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2016         if (!pkt_buf)
2017                 return -ENOMEM;
2018
2019         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2020         if (ret) {
2021                 dev_err(&hdev->pdev->dev,
2022                         "could not calc tx buffer size for all TCs %d\n", ret);
2023                 goto out;
2024         }
2025
2026         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2027         if (ret) {
2028                 dev_err(&hdev->pdev->dev,
2029                         "could not alloc tx buffers %d\n", ret);
2030                 goto out;
2031         }
2032
2033         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2034         if (ret) {
2035                 dev_err(&hdev->pdev->dev,
2036                         "could not calc rx priv buffer size for all TCs %d\n",
2037                         ret);
2038                 goto out;
2039         }
2040
2041         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2042         if (ret) {
2043                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2044                         ret);
2045                 goto out;
2046         }
2047
2048         if (hnae3_dev_dcb_supported(hdev)) {
2049                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2050                 if (ret) {
2051                         dev_err(&hdev->pdev->dev,
2052                                 "could not configure rx private waterline %d\n",
2053                                 ret);
2054                         goto out;
2055                 }
2056
2057                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2058                 if (ret) {
2059                         dev_err(&hdev->pdev->dev,
2060                                 "could not configure common threshold %d\n",
2061                                 ret);
2062                         goto out;
2063                 }
2064         }
2065
2066         ret = hclge_common_wl_config(hdev, pkt_buf);
2067         if (ret)
2068                 dev_err(&hdev->pdev->dev,
2069                         "could not configure common waterline %d\n", ret);
2070
2071 out:
2072         kfree(pkt_buf);
2073         return ret;
2074 }
2075
2076 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2077 {
2078         struct hnae3_handle *roce = &vport->roce;
2079         struct hnae3_handle *nic = &vport->nic;
2080
2081         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2082
2083         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2084             vport->back->num_msi_left == 0)
2085                 return -EINVAL;
2086
2087         roce->rinfo.base_vector = vport->back->roce_base_vector;
2088
2089         roce->rinfo.netdev = nic->kinfo.netdev;
2090         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2091
2092         roce->pdev = nic->pdev;
2093         roce->ae_algo = nic->ae_algo;
2094         roce->numa_node_mask = nic->numa_node_mask;
2095
2096         return 0;
2097 }
2098
2099 static int hclge_init_msi(struct hclge_dev *hdev)
2100 {
2101         struct pci_dev *pdev = hdev->pdev;
2102         int vectors;
2103         int i;
2104
2105         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2106                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2107         if (vectors < 0) {
2108                 dev_err(&pdev->dev,
2109                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2110                         vectors);
2111                 return vectors;
2112         }
2113         if (vectors < hdev->num_msi)
2114                 dev_warn(&hdev->pdev->dev,
2115                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2116                          hdev->num_msi, vectors);
2117
2118         hdev->num_msi = vectors;
2119         hdev->num_msi_left = vectors;
2120         hdev->base_msi_vector = pdev->irq;
2121         hdev->roce_base_vector = hdev->base_msi_vector +
2122                                 hdev->roce_base_msix_offset;
2123
2124         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2125                                            sizeof(u16), GFP_KERNEL);
2126         if (!hdev->vector_status) {
2127                 pci_free_irq_vectors(pdev);
2128                 return -ENOMEM;
2129         }
2130
2131         for (i = 0; i < hdev->num_msi; i++)
2132                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2133
2134         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2135                                         sizeof(int), GFP_KERNEL);
2136         if (!hdev->vector_irq) {
2137                 pci_free_irq_vectors(pdev);
2138                 return -ENOMEM;
2139         }
2140
2141         return 0;
2142 }
2143
2144 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2145 {
2146         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2147                 duplex = HCLGE_MAC_FULL;
2148
2149         return duplex;
2150 }
2151
2152 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2153                                       u8 duplex)
2154 {
2155         struct hclge_config_mac_speed_dup_cmd *req;
2156         struct hclge_desc desc;
2157         int ret;
2158
2159         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2160
2161         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2162
2163         if (duplex)
2164                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2165
2166         switch (speed) {
2167         case HCLGE_MAC_SPEED_10M:
2168                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2169                                 HCLGE_CFG_SPEED_S, 6);
2170                 break;
2171         case HCLGE_MAC_SPEED_100M:
2172                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2173                                 HCLGE_CFG_SPEED_S, 7);
2174                 break;
2175         case HCLGE_MAC_SPEED_1G:
2176                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2177                                 HCLGE_CFG_SPEED_S, 0);
2178                 break;
2179         case HCLGE_MAC_SPEED_10G:
2180                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2181                                 HCLGE_CFG_SPEED_S, 1);
2182                 break;
2183         case HCLGE_MAC_SPEED_25G:
2184                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2185                                 HCLGE_CFG_SPEED_S, 2);
2186                 break;
2187         case HCLGE_MAC_SPEED_40G:
2188                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2189                                 HCLGE_CFG_SPEED_S, 3);
2190                 break;
2191         case HCLGE_MAC_SPEED_50G:
2192                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2193                                 HCLGE_CFG_SPEED_S, 4);
2194                 break;
2195         case HCLGE_MAC_SPEED_100G:
2196                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2197                                 HCLGE_CFG_SPEED_S, 5);
2198                 break;
2199         default:
2200                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2201                 return -EINVAL;
2202         }
2203
2204         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2205                       1);
2206
2207         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2208         if (ret) {
2209                 dev_err(&hdev->pdev->dev,
2210                         "mac speed/duplex config cmd failed %d.\n", ret);
2211                 return ret;
2212         }
2213
2214         return 0;
2215 }
2216
2217 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2218 {
2219         int ret;
2220
2221         duplex = hclge_check_speed_dup(duplex, speed);
2222         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2223                 return 0;
2224
2225         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2226         if (ret)
2227                 return ret;
2228
2229         hdev->hw.mac.speed = speed;
2230         hdev->hw.mac.duplex = duplex;
2231
2232         return 0;
2233 }
2234
2235 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2236                                      u8 duplex)
2237 {
2238         struct hclge_vport *vport = hclge_get_vport(handle);
2239         struct hclge_dev *hdev = vport->back;
2240
2241         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2242 }
2243
2244 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2245 {
2246         struct hclge_config_auto_neg_cmd *req;
2247         struct hclge_desc desc;
2248         u32 flag = 0;
2249         int ret;
2250
2251         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2252
2253         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2254         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2255         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2256
2257         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2258         if (ret)
2259                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2260                         ret);
2261
2262         return ret;
2263 }
2264
2265 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2266 {
2267         struct hclge_vport *vport = hclge_get_vport(handle);
2268         struct hclge_dev *hdev = vport->back;
2269
2270         if (!hdev->hw.mac.support_autoneg) {
2271                 if (enable) {
2272                         dev_err(&hdev->pdev->dev,
2273                                 "autoneg is not supported by current port\n");
2274                         return -EOPNOTSUPP;
2275                 } else {
2276                         return 0;
2277                 }
2278         }
2279
2280         return hclge_set_autoneg_en(hdev, enable);
2281 }
2282
2283 static int hclge_get_autoneg(struct hnae3_handle *handle)
2284 {
2285         struct hclge_vport *vport = hclge_get_vport(handle);
2286         struct hclge_dev *hdev = vport->back;
2287         struct phy_device *phydev = hdev->hw.mac.phydev;
2288
2289         if (phydev)
2290                 return phydev->autoneg;
2291
2292         return hdev->hw.mac.autoneg;
2293 }
2294
2295 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2296 {
2297         struct hclge_vport *vport = hclge_get_vport(handle);
2298         struct hclge_dev *hdev = vport->back;
2299         int ret;
2300
2301         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2302
2303         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2304         if (ret)
2305                 return ret;
2306         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2307 }
2308
2309 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2310 {
2311         struct hclge_config_fec_cmd *req;
2312         struct hclge_desc desc;
2313         int ret;
2314
2315         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2316
2317         req = (struct hclge_config_fec_cmd *)desc.data;
2318         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2319                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2320         if (fec_mode & BIT(HNAE3_FEC_RS))
2321                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2322                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2323         if (fec_mode & BIT(HNAE3_FEC_BASER))
2324                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2325                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2326
2327         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2328         if (ret)
2329                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2330
2331         return ret;
2332 }
2333
2334 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2335 {
2336         struct hclge_vport *vport = hclge_get_vport(handle);
2337         struct hclge_dev *hdev = vport->back;
2338         struct hclge_mac *mac = &hdev->hw.mac;
2339         int ret;
2340
2341         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2342                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2343                 return -EINVAL;
2344         }
2345
2346         ret = hclge_set_fec_hw(hdev, fec_mode);
2347         if (ret)
2348                 return ret;
2349
2350         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2351         return 0;
2352 }
2353
2354 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2355                           u8 *fec_mode)
2356 {
2357         struct hclge_vport *vport = hclge_get_vport(handle);
2358         struct hclge_dev *hdev = vport->back;
2359         struct hclge_mac *mac = &hdev->hw.mac;
2360
2361         if (fec_ability)
2362                 *fec_ability = mac->fec_ability;
2363         if (fec_mode)
2364                 *fec_mode = mac->fec_mode;
2365 }
2366
2367 static int hclge_mac_init(struct hclge_dev *hdev)
2368 {
2369         struct hclge_mac *mac = &hdev->hw.mac;
2370         int ret;
2371
2372         hdev->support_sfp_query = true;
2373         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2374         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2375                                          hdev->hw.mac.duplex);
2376         if (ret) {
2377                 dev_err(&hdev->pdev->dev,
2378                         "Config mac speed dup fail ret=%d\n", ret);
2379                 return ret;
2380         }
2381
2382         mac->link = 0;
2383
2384         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2385                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2386                 if (ret) {
2387                         dev_err(&hdev->pdev->dev,
2388                                 "Fec mode init fail, ret = %d\n", ret);
2389                         return ret;
2390                 }
2391         }
2392
2393         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2394         if (ret) {
2395                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2396                 return ret;
2397         }
2398
2399         ret = hclge_buffer_alloc(hdev);
2400         if (ret)
2401                 dev_err(&hdev->pdev->dev,
2402                         "allocate buffer fail, ret=%d\n", ret);
2403
2404         return ret;
2405 }
2406
2407 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2408 {
2409         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2410             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2411                 schedule_work(&hdev->mbx_service_task);
2412 }
2413
2414 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2415 {
2416         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2417             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2418                 schedule_work(&hdev->rst_service_task);
2419 }
2420
2421 static void hclge_task_schedule(struct hclge_dev *hdev)
2422 {
2423         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2424             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2425             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2426                 (void)schedule_work(&hdev->service_task);
2427 }
2428
2429 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2430 {
2431         struct hclge_link_status_cmd *req;
2432         struct hclge_desc desc;
2433         int link_status;
2434         int ret;
2435
2436         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2437         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2438         if (ret) {
2439                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2440                         ret);
2441                 return ret;
2442         }
2443
2444         req = (struct hclge_link_status_cmd *)desc.data;
2445         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2446
2447         return !!link_status;
2448 }
2449
2450 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2451 {
2452         int mac_state;
2453         int link_stat;
2454
2455         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2456                 return 0;
2457
2458         mac_state = hclge_get_mac_link_status(hdev);
2459
2460         if (hdev->hw.mac.phydev) {
2461                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2462                         link_stat = mac_state &
2463                                 hdev->hw.mac.phydev->link;
2464                 else
2465                         link_stat = 0;
2466
2467         } else {
2468                 link_stat = mac_state;
2469         }
2470
2471         return !!link_stat;
2472 }
2473
2474 static void hclge_update_link_status(struct hclge_dev *hdev)
2475 {
2476         struct hnae3_client *rclient = hdev->roce_client;
2477         struct hnae3_client *client = hdev->nic_client;
2478         struct hnae3_handle *rhandle;
2479         struct hnae3_handle *handle;
2480         int state;
2481         int i;
2482
2483         if (!client)
2484                 return;
2485         state = hclge_get_mac_phy_link(hdev);
2486         if (state != hdev->hw.mac.link) {
2487                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2488                         handle = &hdev->vport[i].nic;
2489                         client->ops->link_status_change(handle, state);
2490                         hclge_config_mac_tnl_int(hdev, state);
2491                         rhandle = &hdev->vport[i].roce;
2492                         if (rclient && rclient->ops->link_status_change)
2493                                 rclient->ops->link_status_change(rhandle,
2494                                                                  state);
2495                 }
2496                 hdev->hw.mac.link = state;
2497         }
2498 }
2499
2500 static void hclge_update_port_capability(struct hclge_mac *mac)
2501 {
2502         /* update fec ability by speed */
2503         hclge_convert_setting_fec(mac);
2504
2505         /* firmware can not identify back plane type, the media type
2506          * read from configuration can help deal it
2507          */
2508         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2509             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2510                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2511         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2512                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2513
2514         if (mac->support_autoneg == true) {
2515                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2516                 linkmode_copy(mac->advertising, mac->supported);
2517         } else {
2518                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2519                                    mac->supported);
2520                 linkmode_zero(mac->advertising);
2521         }
2522 }
2523
2524 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2525 {
2526         struct hclge_sfp_info_cmd *resp;
2527         struct hclge_desc desc;
2528         int ret;
2529
2530         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2531         resp = (struct hclge_sfp_info_cmd *)desc.data;
2532         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2533         if (ret == -EOPNOTSUPP) {
2534                 dev_warn(&hdev->pdev->dev,
2535                          "IMP do not support get SFP speed %d\n", ret);
2536                 return ret;
2537         } else if (ret) {
2538                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2539                 return ret;
2540         }
2541
2542         *speed = le32_to_cpu(resp->speed);
2543
2544         return 0;
2545 }
2546
2547 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2548 {
2549         struct hclge_sfp_info_cmd *resp;
2550         struct hclge_desc desc;
2551         int ret;
2552
2553         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2554         resp = (struct hclge_sfp_info_cmd *)desc.data;
2555
2556         resp->query_type = QUERY_ACTIVE_SPEED;
2557
2558         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2559         if (ret == -EOPNOTSUPP) {
2560                 dev_warn(&hdev->pdev->dev,
2561                          "IMP does not support get SFP info %d\n", ret);
2562                 return ret;
2563         } else if (ret) {
2564                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2565                 return ret;
2566         }
2567
2568         mac->speed = le32_to_cpu(resp->speed);
2569         /* if resp->speed_ability is 0, it means it's an old version
2570          * firmware, do not update these params
2571          */
2572         if (resp->speed_ability) {
2573                 mac->module_type = le32_to_cpu(resp->module_type);
2574                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2575                 mac->autoneg = resp->autoneg;
2576                 mac->support_autoneg = resp->autoneg_ability;
2577                 if (!resp->active_fec)
2578                         mac->fec_mode = 0;
2579                 else
2580                         mac->fec_mode = BIT(resp->active_fec);
2581         } else {
2582                 mac->speed_type = QUERY_SFP_SPEED;
2583         }
2584
2585         return 0;
2586 }
2587
2588 static int hclge_update_port_info(struct hclge_dev *hdev)
2589 {
2590         struct hclge_mac *mac = &hdev->hw.mac;
2591         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2592         int ret;
2593
2594         /* get the port info from SFP cmd if not copper port */
2595         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2596                 return 0;
2597
2598         /* if IMP does not support get SFP/qSFP info, return directly */
2599         if (!hdev->support_sfp_query)
2600                 return 0;
2601
2602         if (hdev->pdev->revision >= 0x21)
2603                 ret = hclge_get_sfp_info(hdev, mac);
2604         else
2605                 ret = hclge_get_sfp_speed(hdev, &speed);
2606
2607         if (ret == -EOPNOTSUPP) {
2608                 hdev->support_sfp_query = false;
2609                 return ret;
2610         } else if (ret) {
2611                 return ret;
2612         }
2613
2614         if (hdev->pdev->revision >= 0x21) {
2615                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2616                         hclge_update_port_capability(mac);
2617                         return 0;
2618                 }
2619                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2620                                                HCLGE_MAC_FULL);
2621         } else {
2622                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2623                         return 0; /* do nothing if no SFP */
2624
2625                 /* must config full duplex for SFP */
2626                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2627         }
2628 }
2629
2630 static int hclge_get_status(struct hnae3_handle *handle)
2631 {
2632         struct hclge_vport *vport = hclge_get_vport(handle);
2633         struct hclge_dev *hdev = vport->back;
2634
2635         hclge_update_link_status(hdev);
2636
2637         return hdev->hw.mac.link;
2638 }
2639
2640 static void hclge_service_timer(struct timer_list *t)
2641 {
2642         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2643
2644         mod_timer(&hdev->service_timer, jiffies + HZ);
2645         hdev->hw_stats.stats_timer++;
2646         hdev->fd_arfs_expire_timer++;
2647         hclge_task_schedule(hdev);
2648 }
2649
2650 static void hclge_service_complete(struct hclge_dev *hdev)
2651 {
2652         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2653
2654         /* Flush memory before next watchdog */
2655         smp_mb__before_atomic();
2656         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2657 }
2658
2659 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2660 {
2661         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2662
2663         /* fetch the events from their corresponding regs */
2664         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2665         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2666         msix_src_reg = hclge_read_dev(&hdev->hw,
2667                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2668
2669         /* Assumption: If by any chance reset and mailbox events are reported
2670          * together then we will only process reset event in this go and will
2671          * defer the processing of the mailbox events. Since, we would have not
2672          * cleared RX CMDQ event this time we would receive again another
2673          * interrupt from H/W just for the mailbox.
2674          */
2675
2676         /* check for vector0 reset event sources */
2677         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2678                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2679                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2680                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2681                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2682                 hdev->rst_stats.imp_rst_cnt++;
2683                 return HCLGE_VECTOR0_EVENT_RST;
2684         }
2685
2686         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2687                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2688                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2689                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2690                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2691                 hdev->rst_stats.global_rst_cnt++;
2692                 return HCLGE_VECTOR0_EVENT_RST;
2693         }
2694
2695         /* check for vector0 msix event source */
2696         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2697                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2698                         msix_src_reg);
2699                 return HCLGE_VECTOR0_EVENT_ERR;
2700         }
2701
2702         /* check for vector0 mailbox(=CMDQ RX) event source */
2703         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2704                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2705                 *clearval = cmdq_src_reg;
2706                 return HCLGE_VECTOR0_EVENT_MBX;
2707         }
2708
2709         /* print other vector0 event source */
2710         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2711                 cmdq_src_reg, msix_src_reg);
2712         return HCLGE_VECTOR0_EVENT_OTHER;
2713 }
2714
2715 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2716                                     u32 regclr)
2717 {
2718         switch (event_type) {
2719         case HCLGE_VECTOR0_EVENT_RST:
2720                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2721                 break;
2722         case HCLGE_VECTOR0_EVENT_MBX:
2723                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2724                 break;
2725         default:
2726                 break;
2727         }
2728 }
2729
2730 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2731 {
2732         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2733                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2734                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2735                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2736         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2737 }
2738
2739 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2740 {
2741         writel(enable ? 1 : 0, vector->addr);
2742 }
2743
2744 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2745 {
2746         struct hclge_dev *hdev = data;
2747         u32 event_cause;
2748         u32 clearval;
2749
2750         hclge_enable_vector(&hdev->misc_vector, false);
2751         event_cause = hclge_check_event_cause(hdev, &clearval);
2752
2753         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2754         switch (event_cause) {
2755         case HCLGE_VECTOR0_EVENT_ERR:
2756                 /* we do not know what type of reset is required now. This could
2757                  * only be decided after we fetch the type of errors which
2758                  * caused this event. Therefore, we will do below for now:
2759                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2760                  *    have defered type of reset to be used.
2761                  * 2. Schedule the reset serivce task.
2762                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2763                  *    will fetch the correct type of reset.  This would be done
2764                  *    by first decoding the types of errors.
2765                  */
2766                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2767                 /* fall through */
2768         case HCLGE_VECTOR0_EVENT_RST:
2769                 hclge_reset_task_schedule(hdev);
2770                 break;
2771         case HCLGE_VECTOR0_EVENT_MBX:
2772                 /* If we are here then,
2773                  * 1. Either we are not handling any mbx task and we are not
2774                  *    scheduled as well
2775                  *                        OR
2776                  * 2. We could be handling a mbx task but nothing more is
2777                  *    scheduled.
2778                  * In both cases, we should schedule mbx task as there are more
2779                  * mbx messages reported by this interrupt.
2780                  */
2781                 hclge_mbx_task_schedule(hdev);
2782                 break;
2783         default:
2784                 dev_warn(&hdev->pdev->dev,
2785                          "received unknown or unhandled event of vector0\n");
2786                 break;
2787         }
2788
2789         /* clear the source of interrupt if it is not cause by reset */
2790         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2791                 hclge_clear_event_cause(hdev, event_cause, clearval);
2792                 hclge_enable_vector(&hdev->misc_vector, true);
2793         }
2794
2795         return IRQ_HANDLED;
2796 }
2797
2798 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2799 {
2800         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2801                 dev_warn(&hdev->pdev->dev,
2802                          "vector(vector_id %d) has been freed.\n", vector_id);
2803                 return;
2804         }
2805
2806         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2807         hdev->num_msi_left += 1;
2808         hdev->num_msi_used -= 1;
2809 }
2810
2811 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2812 {
2813         struct hclge_misc_vector *vector = &hdev->misc_vector;
2814
2815         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2816
2817         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2818         hdev->vector_status[0] = 0;
2819
2820         hdev->num_msi_left -= 1;
2821         hdev->num_msi_used += 1;
2822 }
2823
2824 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2825 {
2826         int ret;
2827
2828         hclge_get_misc_vector(hdev);
2829
2830         /* this would be explicitly freed in the end */
2831         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2832                           0, "hclge_misc", hdev);
2833         if (ret) {
2834                 hclge_free_vector(hdev, 0);
2835                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2836                         hdev->misc_vector.vector_irq);
2837         }
2838
2839         return ret;
2840 }
2841
2842 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2843 {
2844         free_irq(hdev->misc_vector.vector_irq, hdev);
2845         hclge_free_vector(hdev, 0);
2846 }
2847
2848 int hclge_notify_client(struct hclge_dev *hdev,
2849                         enum hnae3_reset_notify_type type)
2850 {
2851         struct hnae3_client *client = hdev->nic_client;
2852         u16 i;
2853
2854         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
2855                 return 0;
2856
2857         if (!client->ops->reset_notify)
2858                 return -EOPNOTSUPP;
2859
2860         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2861                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2862                 int ret;
2863
2864                 ret = client->ops->reset_notify(handle, type);
2865                 if (ret) {
2866                         dev_err(&hdev->pdev->dev,
2867                                 "notify nic client failed %d(%d)\n", type, ret);
2868                         return ret;
2869                 }
2870         }
2871
2872         return 0;
2873 }
2874
2875 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2876                                     enum hnae3_reset_notify_type type)
2877 {
2878         struct hnae3_client *client = hdev->roce_client;
2879         int ret = 0;
2880         u16 i;
2881
2882         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
2883                 return 0;
2884
2885         if (!client->ops->reset_notify)
2886                 return -EOPNOTSUPP;
2887
2888         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2889                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2890
2891                 ret = client->ops->reset_notify(handle, type);
2892                 if (ret) {
2893                         dev_err(&hdev->pdev->dev,
2894                                 "notify roce client failed %d(%d)",
2895                                 type, ret);
2896                         return ret;
2897                 }
2898         }
2899
2900         return ret;
2901 }
2902
2903 static int hclge_reset_wait(struct hclge_dev *hdev)
2904 {
2905 #define HCLGE_RESET_WATI_MS     100
2906 #define HCLGE_RESET_WAIT_CNT    200
2907         u32 val, reg, reg_bit;
2908         u32 cnt = 0;
2909
2910         switch (hdev->reset_type) {
2911         case HNAE3_IMP_RESET:
2912                 reg = HCLGE_GLOBAL_RESET_REG;
2913                 reg_bit = HCLGE_IMP_RESET_BIT;
2914                 break;
2915         case HNAE3_GLOBAL_RESET:
2916                 reg = HCLGE_GLOBAL_RESET_REG;
2917                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2918                 break;
2919         case HNAE3_FUNC_RESET:
2920                 reg = HCLGE_FUN_RST_ING;
2921                 reg_bit = HCLGE_FUN_RST_ING_B;
2922                 break;
2923         case HNAE3_FLR_RESET:
2924                 break;
2925         default:
2926                 dev_err(&hdev->pdev->dev,
2927                         "Wait for unsupported reset type: %d\n",
2928                         hdev->reset_type);
2929                 return -EINVAL;
2930         }
2931
2932         if (hdev->reset_type == HNAE3_FLR_RESET) {
2933                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2934                        cnt++ < HCLGE_RESET_WAIT_CNT)
2935                         msleep(HCLGE_RESET_WATI_MS);
2936
2937                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2938                         dev_err(&hdev->pdev->dev,
2939                                 "flr wait timeout: %d\n", cnt);
2940                         return -EBUSY;
2941                 }
2942
2943                 return 0;
2944         }
2945
2946         val = hclge_read_dev(&hdev->hw, reg);
2947         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2948                 msleep(HCLGE_RESET_WATI_MS);
2949                 val = hclge_read_dev(&hdev->hw, reg);
2950                 cnt++;
2951         }
2952
2953         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2954                 dev_warn(&hdev->pdev->dev,
2955                          "Wait for reset timeout: %d\n", hdev->reset_type);
2956                 return -EBUSY;
2957         }
2958
2959         return 0;
2960 }
2961
2962 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2963 {
2964         struct hclge_vf_rst_cmd *req;
2965         struct hclge_desc desc;
2966
2967         req = (struct hclge_vf_rst_cmd *)desc.data;
2968         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2969         req->dest_vfid = func_id;
2970
2971         if (reset)
2972                 req->vf_rst = 0x1;
2973
2974         return hclge_cmd_send(&hdev->hw, &desc, 1);
2975 }
2976
2977 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2978 {
2979         int i;
2980
2981         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2982                 struct hclge_vport *vport = &hdev->vport[i];
2983                 int ret;
2984
2985                 /* Send cmd to set/clear VF's FUNC_RST_ING */
2986                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2987                 if (ret) {
2988                         dev_err(&hdev->pdev->dev,
2989                                 "set vf(%d) rst failed %d!\n",
2990                                 vport->vport_id, ret);
2991                         return ret;
2992                 }
2993
2994                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2995                         continue;
2996
2997                 /* Inform VF to process the reset.
2998                  * hclge_inform_reset_assert_to_vf may fail if VF
2999                  * driver is not loaded.
3000                  */
3001                 ret = hclge_inform_reset_assert_to_vf(vport);
3002                 if (ret)
3003                         dev_warn(&hdev->pdev->dev,
3004                                  "inform reset to vf(%d) failed %d!\n",
3005                                  vport->vport_id, ret);
3006         }
3007
3008         return 0;
3009 }
3010
3011 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3012 {
3013         struct hclge_desc desc;
3014         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3015         int ret;
3016
3017         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3018         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3019         req->fun_reset_vfid = func_id;
3020
3021         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3022         if (ret)
3023                 dev_err(&hdev->pdev->dev,
3024                         "send function reset cmd fail, status =%d\n", ret);
3025
3026         return ret;
3027 }
3028
3029 static void hclge_do_reset(struct hclge_dev *hdev)
3030 {
3031         struct hnae3_handle *handle = &hdev->vport[0].nic;
3032         struct pci_dev *pdev = hdev->pdev;
3033         u32 val;
3034
3035         if (hclge_get_hw_reset_stat(handle)) {
3036                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3037                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3038                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3039                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3040                 return;
3041         }
3042
3043         switch (hdev->reset_type) {
3044         case HNAE3_GLOBAL_RESET:
3045                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3046                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3047                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3048                 dev_info(&pdev->dev, "Global Reset requested\n");
3049                 break;
3050         case HNAE3_FUNC_RESET:
3051                 dev_info(&pdev->dev, "PF Reset requested\n");
3052                 /* schedule again to check later */
3053                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3054                 hclge_reset_task_schedule(hdev);
3055                 break;
3056         case HNAE3_FLR_RESET:
3057                 dev_info(&pdev->dev, "FLR requested\n");
3058                 /* schedule again to check later */
3059                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3060                 hclge_reset_task_schedule(hdev);
3061                 break;
3062         default:
3063                 dev_warn(&pdev->dev,
3064                          "Unsupported reset type: %d\n", hdev->reset_type);
3065                 break;
3066         }
3067 }
3068
3069 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3070                                                    unsigned long *addr)
3071 {
3072         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3073
3074         /* first, resolve any unknown reset type to the known type(s) */
3075         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3076                 /* we will intentionally ignore any errors from this function
3077                  *  as we will end up in *some* reset request in any case
3078                  */
3079                 hclge_handle_hw_msix_error(hdev, addr);
3080                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3081                 /* We defered the clearing of the error event which caused
3082                  * interrupt since it was not posssible to do that in
3083                  * interrupt context (and this is the reason we introduced
3084                  * new UNKNOWN reset type). Now, the errors have been
3085                  * handled and cleared in hardware we can safely enable
3086                  * interrupts. This is an exception to the norm.
3087                  */
3088                 hclge_enable_vector(&hdev->misc_vector, true);
3089         }
3090
3091         /* return the highest priority reset level amongst all */
3092         if (test_bit(HNAE3_IMP_RESET, addr)) {
3093                 rst_level = HNAE3_IMP_RESET;
3094                 clear_bit(HNAE3_IMP_RESET, addr);
3095                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3096                 clear_bit(HNAE3_FUNC_RESET, addr);
3097         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3098                 rst_level = HNAE3_GLOBAL_RESET;
3099                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3100                 clear_bit(HNAE3_FUNC_RESET, addr);
3101         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3102                 rst_level = HNAE3_FUNC_RESET;
3103                 clear_bit(HNAE3_FUNC_RESET, addr);
3104         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3105                 rst_level = HNAE3_FLR_RESET;
3106                 clear_bit(HNAE3_FLR_RESET, addr);
3107         }
3108
3109         if (hdev->reset_type != HNAE3_NONE_RESET &&
3110             rst_level < hdev->reset_type)
3111                 return HNAE3_NONE_RESET;
3112
3113         return rst_level;
3114 }
3115
3116 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3117 {
3118         u32 clearval = 0;
3119
3120         switch (hdev->reset_type) {
3121         case HNAE3_IMP_RESET:
3122                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3123                 break;
3124         case HNAE3_GLOBAL_RESET:
3125                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3126                 break;
3127         default:
3128                 break;
3129         }
3130
3131         if (!clearval)
3132                 return;
3133
3134         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3135         hclge_enable_vector(&hdev->misc_vector, true);
3136 }
3137
3138 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3139 {
3140         int ret = 0;
3141
3142         switch (hdev->reset_type) {
3143         case HNAE3_FUNC_RESET:
3144                 /* fall through */
3145         case HNAE3_FLR_RESET:
3146                 ret = hclge_set_all_vf_rst(hdev, true);
3147                 break;
3148         default:
3149                 break;
3150         }
3151
3152         return ret;
3153 }
3154
3155 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3156 {
3157 #define HCLGE_RESET_SYNC_TIME 100
3158
3159         u32 reg_val;
3160         int ret = 0;
3161
3162         switch (hdev->reset_type) {
3163         case HNAE3_FUNC_RESET:
3164                 /* There is no mechanism for PF to know if VF has stopped IO
3165                  * for now, just wait 100 ms for VF to stop IO
3166                  */
3167                 msleep(HCLGE_RESET_SYNC_TIME);
3168                 ret = hclge_func_reset_cmd(hdev, 0);
3169                 if (ret) {
3170                         dev_err(&hdev->pdev->dev,
3171                                 "asserting function reset fail %d!\n", ret);
3172                         return ret;
3173                 }
3174
3175                 /* After performaning pf reset, it is not necessary to do the
3176                  * mailbox handling or send any command to firmware, because
3177                  * any mailbox handling or command to firmware is only valid
3178                  * after hclge_cmd_init is called.
3179                  */
3180                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3181                 hdev->rst_stats.pf_rst_cnt++;
3182                 break;
3183         case HNAE3_FLR_RESET:
3184                 /* There is no mechanism for PF to know if VF has stopped IO
3185                  * for now, just wait 100 ms for VF to stop IO
3186                  */
3187                 msleep(HCLGE_RESET_SYNC_TIME);
3188                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3189                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3190                 hdev->rst_stats.flr_rst_cnt++;
3191                 break;
3192         case HNAE3_IMP_RESET:
3193                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3194                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3195                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3196                 break;
3197         default:
3198                 break;
3199         }
3200
3201         /* inform hardware that preparatory work is done */
3202         msleep(HCLGE_RESET_SYNC_TIME);
3203         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3204                         HCLGE_NIC_CMQ_ENABLE);
3205         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3206
3207         return ret;
3208 }
3209
3210 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3211 {
3212 #define MAX_RESET_FAIL_CNT 5
3213
3214         if (hdev->reset_pending) {
3215                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3216                          hdev->reset_pending);
3217                 return true;
3218         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3219                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3220                     BIT(HCLGE_IMP_RESET_BIT))) {
3221                 dev_info(&hdev->pdev->dev,
3222                          "reset failed because IMP Reset is pending\n");
3223                 hclge_clear_reset_cause(hdev);
3224                 return false;
3225         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3226                 hdev->reset_fail_cnt++;
3227                 if (is_timeout) {
3228                         set_bit(hdev->reset_type, &hdev->reset_pending);
3229                         dev_info(&hdev->pdev->dev,
3230                                  "re-schedule to wait for hw reset done\n");
3231                         return true;
3232                 }
3233
3234                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3235                 hclge_clear_reset_cause(hdev);
3236                 mod_timer(&hdev->reset_timer,
3237                           jiffies + HCLGE_RESET_INTERVAL);
3238
3239                 return false;
3240         }
3241
3242         hclge_clear_reset_cause(hdev);
3243         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3244         return false;
3245 }
3246
3247 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3248 {
3249         int ret = 0;
3250
3251         switch (hdev->reset_type) {
3252         case HNAE3_FUNC_RESET:
3253                 /* fall through */
3254         case HNAE3_FLR_RESET:
3255                 ret = hclge_set_all_vf_rst(hdev, false);
3256                 break;
3257         default:
3258                 break;
3259         }
3260
3261         return ret;
3262 }
3263
3264 static int hclge_reset_stack(struct hclge_dev *hdev)
3265 {
3266         int ret;
3267
3268         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3269         if (ret)
3270                 return ret;
3271
3272         ret = hclge_reset_ae_dev(hdev->ae_dev);
3273         if (ret)
3274                 return ret;
3275
3276         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3277         if (ret)
3278                 return ret;
3279
3280         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3281 }
3282
3283 static void hclge_reset(struct hclge_dev *hdev)
3284 {
3285         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3286         bool is_timeout = false;
3287         int ret;
3288
3289         /* Initialize ae_dev reset status as well, in case enet layer wants to
3290          * know if device is undergoing reset
3291          */
3292         ae_dev->reset_type = hdev->reset_type;
3293         hdev->rst_stats.reset_cnt++;
3294         /* perform reset of the stack & ae device for a client */
3295         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3296         if (ret)
3297                 goto err_reset;
3298
3299         ret = hclge_reset_prepare_down(hdev);
3300         if (ret)
3301                 goto err_reset;
3302
3303         rtnl_lock();
3304         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3305         if (ret)
3306                 goto err_reset_lock;
3307
3308         rtnl_unlock();
3309
3310         ret = hclge_reset_prepare_wait(hdev);
3311         if (ret)
3312                 goto err_reset;
3313
3314         if (hclge_reset_wait(hdev)) {
3315                 is_timeout = true;
3316                 goto err_reset;
3317         }
3318
3319         hdev->rst_stats.hw_reset_done_cnt++;
3320
3321         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3322         if (ret)
3323                 goto err_reset;
3324
3325         rtnl_lock();
3326
3327         ret = hclge_reset_stack(hdev);
3328         if (ret)
3329                 goto err_reset_lock;
3330
3331         hclge_clear_reset_cause(hdev);
3332
3333         ret = hclge_reset_prepare_up(hdev);
3334         if (ret)
3335                 goto err_reset_lock;
3336
3337         rtnl_unlock();
3338
3339         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3340         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3341          * times
3342          */
3343         if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3344                 goto err_reset;
3345
3346         rtnl_lock();
3347
3348         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3349         if (ret)
3350                 goto err_reset_lock;
3351
3352         rtnl_unlock();
3353
3354         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3355         if (ret)
3356                 goto err_reset;
3357
3358         hdev->last_reset_time = jiffies;
3359         hdev->reset_fail_cnt = 0;
3360         hdev->rst_stats.reset_done_cnt++;
3361         ae_dev->reset_type = HNAE3_NONE_RESET;
3362         del_timer(&hdev->reset_timer);
3363
3364         return;
3365
3366 err_reset_lock:
3367         rtnl_unlock();
3368 err_reset:
3369         if (hclge_reset_err_handle(hdev, is_timeout))
3370                 hclge_reset_task_schedule(hdev);
3371 }
3372
3373 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3374 {
3375         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3376         struct hclge_dev *hdev = ae_dev->priv;
3377
3378         /* We might end up getting called broadly because of 2 below cases:
3379          * 1. Recoverable error was conveyed through APEI and only way to bring
3380          *    normalcy is to reset.
3381          * 2. A new reset request from the stack due to timeout
3382          *
3383          * For the first case,error event might not have ae handle available.
3384          * check if this is a new reset request and we are not here just because
3385          * last reset attempt did not succeed and watchdog hit us again. We will
3386          * know this if last reset request did not occur very recently (watchdog
3387          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3388          * In case of new request we reset the "reset level" to PF reset.
3389          * And if it is a repeat reset request of the most recent one then we
3390          * want to make sure we throttle the reset request. Therefore, we will
3391          * not allow it again before 3*HZ times.
3392          */
3393         if (!handle)
3394                 handle = &hdev->vport[0].nic;
3395
3396         if (time_before(jiffies, (hdev->last_reset_time +
3397                                   HCLGE_RESET_INTERVAL)))
3398                 return;
3399         else if (hdev->default_reset_request)
3400                 hdev->reset_level =
3401                         hclge_get_reset_level(hdev,
3402                                               &hdev->default_reset_request);
3403         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3404                 hdev->reset_level = HNAE3_FUNC_RESET;
3405
3406         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3407                  hdev->reset_level);
3408
3409         /* request reset & schedule reset task */
3410         set_bit(hdev->reset_level, &hdev->reset_request);
3411         hclge_reset_task_schedule(hdev);
3412
3413         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3414                 hdev->reset_level++;
3415 }
3416
3417 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3418                                         enum hnae3_reset_type rst_type)
3419 {
3420         struct hclge_dev *hdev = ae_dev->priv;
3421
3422         set_bit(rst_type, &hdev->default_reset_request);
3423 }
3424
3425 static void hclge_reset_timer(struct timer_list *t)
3426 {
3427         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3428
3429         dev_info(&hdev->pdev->dev,
3430                  "triggering global reset in reset timer\n");
3431         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3432         hclge_reset_event(hdev->pdev, NULL);
3433 }
3434
3435 static void hclge_reset_subtask(struct hclge_dev *hdev)
3436 {
3437         /* check if there is any ongoing reset in the hardware. This status can
3438          * be checked from reset_pending. If there is then, we need to wait for
3439          * hardware to complete reset.
3440          *    a. If we are able to figure out in reasonable time that hardware
3441          *       has fully resetted then, we can proceed with driver, client
3442          *       reset.
3443          *    b. else, we can come back later to check this status so re-sched
3444          *       now.
3445          */
3446         hdev->last_reset_time = jiffies;
3447         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3448         if (hdev->reset_type != HNAE3_NONE_RESET)
3449                 hclge_reset(hdev);
3450
3451         /* check if we got any *new* reset requests to be honored */
3452         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3453         if (hdev->reset_type != HNAE3_NONE_RESET)
3454                 hclge_do_reset(hdev);
3455
3456         hdev->reset_type = HNAE3_NONE_RESET;
3457 }
3458
3459 static void hclge_reset_service_task(struct work_struct *work)
3460 {
3461         struct hclge_dev *hdev =
3462                 container_of(work, struct hclge_dev, rst_service_task);
3463
3464         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3465                 return;
3466
3467         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3468
3469         hclge_reset_subtask(hdev);
3470
3471         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3472 }
3473
3474 static void hclge_mailbox_service_task(struct work_struct *work)
3475 {
3476         struct hclge_dev *hdev =
3477                 container_of(work, struct hclge_dev, mbx_service_task);
3478
3479         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3480                 return;
3481
3482         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3483
3484         hclge_mbx_handler(hdev);
3485
3486         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3487 }
3488
3489 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3490 {
3491         int i;
3492
3493         /* start from vport 1 for PF is always alive */
3494         for (i = 1; i < hdev->num_alloc_vport; i++) {
3495                 struct hclge_vport *vport = &hdev->vport[i];
3496
3497                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3498                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3499
3500                 /* If vf is not alive, set to default value */
3501                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3502                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3503         }
3504 }
3505
3506 static void hclge_service_task(struct work_struct *work)
3507 {
3508         struct hclge_dev *hdev =
3509                 container_of(work, struct hclge_dev, service_task);
3510
3511         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3512                 hclge_update_stats_for_all(hdev);
3513                 hdev->hw_stats.stats_timer = 0;
3514         }
3515
3516         hclge_update_port_info(hdev);
3517         hclge_update_link_status(hdev);
3518         hclge_update_vport_alive(hdev);
3519         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3520                 hclge_rfs_filter_expire(hdev);
3521                 hdev->fd_arfs_expire_timer = 0;
3522         }
3523         hclge_service_complete(hdev);
3524 }
3525
3526 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3527 {
3528         /* VF handle has no client */
3529         if (!handle->client)
3530                 return container_of(handle, struct hclge_vport, nic);
3531         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3532                 return container_of(handle, struct hclge_vport, roce);
3533         else
3534                 return container_of(handle, struct hclge_vport, nic);
3535 }
3536
3537 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3538                             struct hnae3_vector_info *vector_info)
3539 {
3540         struct hclge_vport *vport = hclge_get_vport(handle);
3541         struct hnae3_vector_info *vector = vector_info;
3542         struct hclge_dev *hdev = vport->back;
3543         int alloc = 0;
3544         int i, j;
3545
3546         vector_num = min(hdev->num_msi_left, vector_num);
3547
3548         for (j = 0; j < vector_num; j++) {
3549                 for (i = 1; i < hdev->num_msi; i++) {
3550                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3551                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3552                                 vector->io_addr = hdev->hw.io_base +
3553                                         HCLGE_VECTOR_REG_BASE +
3554                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3555                                         vport->vport_id *
3556                                         HCLGE_VECTOR_VF_OFFSET;
3557                                 hdev->vector_status[i] = vport->vport_id;
3558                                 hdev->vector_irq[i] = vector->vector;
3559
3560                                 vector++;
3561                                 alloc++;
3562
3563                                 break;
3564                         }
3565                 }
3566         }
3567         hdev->num_msi_left -= alloc;
3568         hdev->num_msi_used += alloc;
3569
3570         return alloc;
3571 }
3572
3573 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3574 {
3575         int i;
3576
3577         for (i = 0; i < hdev->num_msi; i++)
3578                 if (vector == hdev->vector_irq[i])
3579                         return i;
3580
3581         return -EINVAL;
3582 }
3583
3584 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3585 {
3586         struct hclge_vport *vport = hclge_get_vport(handle);
3587         struct hclge_dev *hdev = vport->back;
3588         int vector_id;
3589
3590         vector_id = hclge_get_vector_index(hdev, vector);
3591         if (vector_id < 0) {
3592                 dev_err(&hdev->pdev->dev,
3593                         "Get vector index fail. vector_id =%d\n", vector_id);
3594                 return vector_id;
3595         }
3596
3597         hclge_free_vector(hdev, vector_id);
3598
3599         return 0;
3600 }
3601
3602 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3603 {
3604         return HCLGE_RSS_KEY_SIZE;
3605 }
3606
3607 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3608 {
3609         return HCLGE_RSS_IND_TBL_SIZE;
3610 }
3611
3612 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3613                                   const u8 hfunc, const u8 *key)
3614 {
3615         struct hclge_rss_config_cmd *req;
3616         struct hclge_desc desc;
3617         int key_offset = 0;
3618         int key_counts;
3619         int key_size;
3620         int ret;
3621
3622         key_counts = HCLGE_RSS_KEY_SIZE;
3623         req = (struct hclge_rss_config_cmd *)desc.data;
3624
3625         while (key_counts) {
3626                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3627                                            false);
3628
3629                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3630                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3631
3632                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3633                 memcpy(req->hash_key,
3634                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3635
3636                 key_counts -= key_size;
3637                 key_offset++;
3638                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3639                 if (ret) {
3640                         dev_err(&hdev->pdev->dev,
3641                                 "Configure RSS config fail, status = %d\n",
3642                                 ret);
3643                         return ret;
3644                 }
3645         }
3646         return 0;
3647 }
3648
3649 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3650 {
3651         struct hclge_rss_indirection_table_cmd *req;
3652         struct hclge_desc desc;
3653         int i, j;
3654         int ret;
3655
3656         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3657
3658         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3659                 hclge_cmd_setup_basic_desc
3660                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3661
3662                 req->start_table_index =
3663                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3664                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3665
3666                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3667                         req->rss_result[j] =
3668                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3669
3670                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3671                 if (ret) {
3672                         dev_err(&hdev->pdev->dev,
3673                                 "Configure rss indir table fail,status = %d\n",
3674                                 ret);
3675                         return ret;
3676                 }
3677         }
3678         return 0;
3679 }
3680
3681 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3682                                  u16 *tc_size, u16 *tc_offset)
3683 {
3684         struct hclge_rss_tc_mode_cmd *req;
3685         struct hclge_desc desc;
3686         int ret;
3687         int i;
3688
3689         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3690         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3691
3692         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3693                 u16 mode = 0;
3694
3695                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3696                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3697                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3698                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3699                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3700
3701                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3702         }
3703
3704         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3705         if (ret)
3706                 dev_err(&hdev->pdev->dev,
3707                         "Configure rss tc mode fail, status = %d\n", ret);
3708
3709         return ret;
3710 }
3711
3712 static void hclge_get_rss_type(struct hclge_vport *vport)
3713 {
3714         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3715             vport->rss_tuple_sets.ipv4_udp_en ||
3716             vport->rss_tuple_sets.ipv4_sctp_en ||
3717             vport->rss_tuple_sets.ipv6_tcp_en ||
3718             vport->rss_tuple_sets.ipv6_udp_en ||
3719             vport->rss_tuple_sets.ipv6_sctp_en)
3720                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3721         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3722                  vport->rss_tuple_sets.ipv6_fragment_en)
3723                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3724         else
3725                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3726 }
3727
3728 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3729 {
3730         struct hclge_rss_input_tuple_cmd *req;
3731         struct hclge_desc desc;
3732         int ret;
3733
3734         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3735
3736         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3737
3738         /* Get the tuple cfg from pf */
3739         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3740         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3741         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3742         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3743         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3744         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3745         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3746         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3747         hclge_get_rss_type(&hdev->vport[0]);
3748         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3749         if (ret)
3750                 dev_err(&hdev->pdev->dev,
3751                         "Configure rss input fail, status = %d\n", ret);
3752         return ret;
3753 }
3754
3755 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3756                          u8 *key, u8 *hfunc)
3757 {
3758         struct hclge_vport *vport = hclge_get_vport(handle);
3759         int i;
3760
3761         /* Get hash algorithm */
3762         if (hfunc) {
3763                 switch (vport->rss_algo) {
3764                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3765                         *hfunc = ETH_RSS_HASH_TOP;
3766                         break;
3767                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3768                         *hfunc = ETH_RSS_HASH_XOR;
3769                         break;
3770                 default:
3771                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3772                         break;
3773                 }
3774         }
3775
3776         /* Get the RSS Key required by the user */
3777         if (key)
3778                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3779
3780         /* Get indirect table */
3781         if (indir)
3782                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3783                         indir[i] =  vport->rss_indirection_tbl[i];
3784
3785         return 0;
3786 }
3787
3788 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3789                          const  u8 *key, const  u8 hfunc)
3790 {
3791         struct hclge_vport *vport = hclge_get_vport(handle);
3792         struct hclge_dev *hdev = vport->back;
3793         u8 hash_algo;
3794         int ret, i;
3795
3796         /* Set the RSS Hash Key if specififed by the user */
3797         if (key) {
3798                 switch (hfunc) {
3799                 case ETH_RSS_HASH_TOP:
3800                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3801                         break;
3802                 case ETH_RSS_HASH_XOR:
3803                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3804                         break;
3805                 case ETH_RSS_HASH_NO_CHANGE:
3806                         hash_algo = vport->rss_algo;
3807                         break;
3808                 default:
3809                         return -EINVAL;
3810                 }
3811
3812                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3813                 if (ret)
3814                         return ret;
3815
3816                 /* Update the shadow RSS key with user specified qids */
3817                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3818                 vport->rss_algo = hash_algo;
3819         }
3820
3821         /* Update the shadow RSS table with user specified qids */
3822         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3823                 vport->rss_indirection_tbl[i] = indir[i];
3824
3825         /* Update the hardware */
3826         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3827 }
3828
3829 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3830 {
3831         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3832
3833         if (nfc->data & RXH_L4_B_2_3)
3834                 hash_sets |= HCLGE_D_PORT_BIT;
3835         else
3836                 hash_sets &= ~HCLGE_D_PORT_BIT;
3837
3838         if (nfc->data & RXH_IP_SRC)
3839                 hash_sets |= HCLGE_S_IP_BIT;
3840         else
3841                 hash_sets &= ~HCLGE_S_IP_BIT;
3842
3843         if (nfc->data & RXH_IP_DST)
3844                 hash_sets |= HCLGE_D_IP_BIT;
3845         else
3846                 hash_sets &= ~HCLGE_D_IP_BIT;
3847
3848         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3849                 hash_sets |= HCLGE_V_TAG_BIT;
3850
3851         return hash_sets;
3852 }
3853
3854 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3855                                struct ethtool_rxnfc *nfc)
3856 {
3857         struct hclge_vport *vport = hclge_get_vport(handle);
3858         struct hclge_dev *hdev = vport->back;
3859         struct hclge_rss_input_tuple_cmd *req;
3860         struct hclge_desc desc;
3861         u8 tuple_sets;
3862         int ret;
3863
3864         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3865                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3866                 return -EINVAL;
3867
3868         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3869         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3870
3871         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3872         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3873         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3874         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3875         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3876         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3877         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3878         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3879
3880         tuple_sets = hclge_get_rss_hash_bits(nfc);
3881         switch (nfc->flow_type) {
3882         case TCP_V4_FLOW:
3883                 req->ipv4_tcp_en = tuple_sets;
3884                 break;
3885         case TCP_V6_FLOW:
3886                 req->ipv6_tcp_en = tuple_sets;
3887                 break;
3888         case UDP_V4_FLOW:
3889                 req->ipv4_udp_en = tuple_sets;
3890                 break;
3891         case UDP_V6_FLOW:
3892                 req->ipv6_udp_en = tuple_sets;
3893                 break;
3894         case SCTP_V4_FLOW:
3895                 req->ipv4_sctp_en = tuple_sets;
3896                 break;
3897         case SCTP_V6_FLOW:
3898                 if ((nfc->data & RXH_L4_B_0_1) ||
3899                     (nfc->data & RXH_L4_B_2_3))
3900                         return -EINVAL;
3901
3902                 req->ipv6_sctp_en = tuple_sets;
3903                 break;
3904         case IPV4_FLOW:
3905                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3906                 break;
3907         case IPV6_FLOW:
3908                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3909                 break;
3910         default:
3911                 return -EINVAL;
3912         }
3913
3914         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3915         if (ret) {
3916                 dev_err(&hdev->pdev->dev,
3917                         "Set rss tuple fail, status = %d\n", ret);
3918                 return ret;
3919         }
3920
3921         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3922         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3923         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3924         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3925         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3926         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3927         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3928         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3929         hclge_get_rss_type(vport);
3930         return 0;
3931 }
3932
3933 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3934                                struct ethtool_rxnfc *nfc)
3935 {
3936         struct hclge_vport *vport = hclge_get_vport(handle);
3937         u8 tuple_sets;
3938
3939         nfc->data = 0;
3940
3941         switch (nfc->flow_type) {
3942         case TCP_V4_FLOW:
3943                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3944                 break;
3945         case UDP_V4_FLOW:
3946                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3947                 break;
3948         case TCP_V6_FLOW:
3949                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3950                 break;
3951         case UDP_V6_FLOW:
3952                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3953                 break;
3954         case SCTP_V4_FLOW:
3955                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3956                 break;
3957         case SCTP_V6_FLOW:
3958                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3959                 break;
3960         case IPV4_FLOW:
3961         case IPV6_FLOW:
3962                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3963                 break;
3964         default:
3965                 return -EINVAL;
3966         }
3967
3968         if (!tuple_sets)
3969                 return 0;
3970
3971         if (tuple_sets & HCLGE_D_PORT_BIT)
3972                 nfc->data |= RXH_L4_B_2_3;
3973         if (tuple_sets & HCLGE_S_PORT_BIT)
3974                 nfc->data |= RXH_L4_B_0_1;
3975         if (tuple_sets & HCLGE_D_IP_BIT)
3976                 nfc->data |= RXH_IP_DST;
3977         if (tuple_sets & HCLGE_S_IP_BIT)
3978                 nfc->data |= RXH_IP_SRC;
3979
3980         return 0;
3981 }
3982
3983 static int hclge_get_tc_size(struct hnae3_handle *handle)
3984 {
3985         struct hclge_vport *vport = hclge_get_vport(handle);
3986         struct hclge_dev *hdev = vport->back;
3987
3988         return hdev->rss_size_max;
3989 }
3990
3991 int hclge_rss_init_hw(struct hclge_dev *hdev)
3992 {
3993         struct hclge_vport *vport = hdev->vport;
3994         u8 *rss_indir = vport[0].rss_indirection_tbl;
3995         u16 rss_size = vport[0].alloc_rss_size;
3996         u8 *key = vport[0].rss_hash_key;
3997         u8 hfunc = vport[0].rss_algo;
3998         u16 tc_offset[HCLGE_MAX_TC_NUM];
3999         u16 tc_valid[HCLGE_MAX_TC_NUM];
4000         u16 tc_size[HCLGE_MAX_TC_NUM];
4001         u16 roundup_size;
4002         int i, ret;
4003
4004         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4005         if (ret)
4006                 return ret;
4007
4008         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4009         if (ret)
4010                 return ret;
4011
4012         ret = hclge_set_rss_input_tuple(hdev);
4013         if (ret)
4014                 return ret;
4015
4016         /* Each TC have the same queue size, and tc_size set to hardware is
4017          * the log2 of roundup power of two of rss_size, the acutal queue
4018          * size is limited by indirection table.
4019          */
4020         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4021                 dev_err(&hdev->pdev->dev,
4022                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4023                         rss_size);
4024                 return -EINVAL;
4025         }
4026
4027         roundup_size = roundup_pow_of_two(rss_size);
4028         roundup_size = ilog2(roundup_size);
4029
4030         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4031                 tc_valid[i] = 0;
4032
4033                 if (!(hdev->hw_tc_map & BIT(i)))
4034                         continue;
4035
4036                 tc_valid[i] = 1;
4037                 tc_size[i] = roundup_size;
4038                 tc_offset[i] = rss_size * i;
4039         }
4040
4041         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4042 }
4043
4044 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4045 {
4046         struct hclge_vport *vport = hdev->vport;
4047         int i, j;
4048
4049         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4050                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4051                         vport[j].rss_indirection_tbl[i] =
4052                                 i % vport[j].alloc_rss_size;
4053         }
4054 }
4055
4056 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4057 {
4058         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4059         struct hclge_vport *vport = hdev->vport;
4060
4061         if (hdev->pdev->revision >= 0x21)
4062                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4063
4064         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4065                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4066                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4067                 vport[i].rss_tuple_sets.ipv4_udp_en =
4068                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4069                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4070                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4071                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4072                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4073                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4074                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4075                 vport[i].rss_tuple_sets.ipv6_udp_en =
4076                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4077                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4078                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4079                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4080                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4081
4082                 vport[i].rss_algo = rss_algo;
4083
4084                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4085                        HCLGE_RSS_KEY_SIZE);
4086         }
4087
4088         hclge_rss_indir_init_cfg(hdev);
4089 }
4090
4091 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4092                                 int vector_id, bool en,
4093                                 struct hnae3_ring_chain_node *ring_chain)
4094 {
4095         struct hclge_dev *hdev = vport->back;
4096         struct hnae3_ring_chain_node *node;
4097         struct hclge_desc desc;
4098         struct hclge_ctrl_vector_chain_cmd *req
4099                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4100         enum hclge_cmd_status status;
4101         enum hclge_opcode_type op;
4102         u16 tqp_type_and_id;
4103         int i;
4104
4105         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4106         hclge_cmd_setup_basic_desc(&desc, op, false);
4107         req->int_vector_id = vector_id;
4108
4109         i = 0;
4110         for (node = ring_chain; node; node = node->next) {
4111                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4112                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4113                                 HCLGE_INT_TYPE_S,
4114                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4115                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4116                                 HCLGE_TQP_ID_S, node->tqp_index);
4117                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4118                                 HCLGE_INT_GL_IDX_S,
4119                                 hnae3_get_field(node->int_gl_idx,
4120                                                 HNAE3_RING_GL_IDX_M,
4121                                                 HNAE3_RING_GL_IDX_S));
4122                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4123                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4124                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4125                         req->vfid = vport->vport_id;
4126
4127                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4128                         if (status) {
4129                                 dev_err(&hdev->pdev->dev,
4130                                         "Map TQP fail, status is %d.\n",
4131                                         status);
4132                                 return -EIO;
4133                         }
4134                         i = 0;
4135
4136                         hclge_cmd_setup_basic_desc(&desc,
4137                                                    op,
4138                                                    false);
4139                         req->int_vector_id = vector_id;
4140                 }
4141         }
4142
4143         if (i > 0) {
4144                 req->int_cause_num = i;
4145                 req->vfid = vport->vport_id;
4146                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4147                 if (status) {
4148                         dev_err(&hdev->pdev->dev,
4149                                 "Map TQP fail, status is %d.\n", status);
4150                         return -EIO;
4151                 }
4152         }
4153
4154         return 0;
4155 }
4156
4157 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4158                                     struct hnae3_ring_chain_node *ring_chain)
4159 {
4160         struct hclge_vport *vport = hclge_get_vport(handle);
4161         struct hclge_dev *hdev = vport->back;
4162         int vector_id;
4163
4164         vector_id = hclge_get_vector_index(hdev, vector);
4165         if (vector_id < 0) {
4166                 dev_err(&hdev->pdev->dev,
4167                         "Get vector index fail. vector_id =%d\n", vector_id);
4168                 return vector_id;
4169         }
4170
4171         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4172 }
4173
4174 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4175                                        struct hnae3_ring_chain_node *ring_chain)
4176 {
4177         struct hclge_vport *vport = hclge_get_vport(handle);
4178         struct hclge_dev *hdev = vport->back;
4179         int vector_id, ret;
4180
4181         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4182                 return 0;
4183
4184         vector_id = hclge_get_vector_index(hdev, vector);
4185         if (vector_id < 0) {
4186                 dev_err(&handle->pdev->dev,
4187                         "Get vector index fail. ret =%d\n", vector_id);
4188                 return vector_id;
4189         }
4190
4191         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4192         if (ret)
4193                 dev_err(&handle->pdev->dev,
4194                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4195                         vector_id, ret);
4196
4197         return ret;
4198 }
4199
4200 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4201                                struct hclge_promisc_param *param)
4202 {
4203         struct hclge_promisc_cfg_cmd *req;
4204         struct hclge_desc desc;
4205         int ret;
4206
4207         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4208
4209         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4210         req->vf_id = param->vf_id;
4211
4212         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4213          * pdev revision(0x20), new revision support them. The
4214          * value of this two fields will not return error when driver
4215          * send command to fireware in revision(0x20).
4216          */
4217         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4218                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4219
4220         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4221         if (ret)
4222                 dev_err(&hdev->pdev->dev,
4223                         "Set promisc mode fail, status is %d.\n", ret);
4224
4225         return ret;
4226 }
4227
4228 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4229                               bool en_mc, bool en_bc, int vport_id)
4230 {
4231         if (!param)
4232                 return;
4233
4234         memset(param, 0, sizeof(struct hclge_promisc_param));
4235         if (en_uc)
4236                 param->enable = HCLGE_PROMISC_EN_UC;
4237         if (en_mc)
4238                 param->enable |= HCLGE_PROMISC_EN_MC;
4239         if (en_bc)
4240                 param->enable |= HCLGE_PROMISC_EN_BC;
4241         param->vf_id = vport_id;
4242 }
4243
4244 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4245                                   bool en_mc_pmc)
4246 {
4247         struct hclge_vport *vport = hclge_get_vport(handle);
4248         struct hclge_dev *hdev = vport->back;
4249         struct hclge_promisc_param param;
4250         bool en_bc_pmc = true;
4251
4252         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4253          * always bypassed. So broadcast promisc should be disabled until
4254          * user enable promisc mode
4255          */
4256         if (handle->pdev->revision == 0x20)
4257                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4258
4259         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4260                                  vport->vport_id);
4261         return hclge_cmd_set_promisc_mode(hdev, &param);
4262 }
4263
4264 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4265 {
4266         struct hclge_get_fd_mode_cmd *req;
4267         struct hclge_desc desc;
4268         int ret;
4269
4270         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4271
4272         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4273
4274         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4275         if (ret) {
4276                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4277                 return ret;
4278         }
4279
4280         *fd_mode = req->mode;
4281
4282         return ret;
4283 }
4284
4285 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4286                                    u32 *stage1_entry_num,
4287                                    u32 *stage2_entry_num,
4288                                    u16 *stage1_counter_num,
4289                                    u16 *stage2_counter_num)
4290 {
4291         struct hclge_get_fd_allocation_cmd *req;
4292         struct hclge_desc desc;
4293         int ret;
4294
4295         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4296
4297         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4298
4299         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4300         if (ret) {
4301                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4302                         ret);
4303                 return ret;
4304         }
4305
4306         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4307         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4308         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4309         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4310
4311         return ret;
4312 }
4313
4314 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4315 {
4316         struct hclge_set_fd_key_config_cmd *req;
4317         struct hclge_fd_key_cfg *stage;
4318         struct hclge_desc desc;
4319         int ret;
4320
4321         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4322
4323         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4324         stage = &hdev->fd_cfg.key_cfg[stage_num];
4325         req->stage = stage_num;
4326         req->key_select = stage->key_sel;
4327         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4328         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4329         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4330         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4331         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4332         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4333
4334         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4335         if (ret)
4336                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4337
4338         return ret;
4339 }
4340
4341 static int hclge_init_fd_config(struct hclge_dev *hdev)
4342 {
4343 #define LOW_2_WORDS             0x03
4344         struct hclge_fd_key_cfg *key_cfg;
4345         int ret;
4346
4347         if (!hnae3_dev_fd_supported(hdev))
4348                 return 0;
4349
4350         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4351         if (ret)
4352                 return ret;
4353
4354         switch (hdev->fd_cfg.fd_mode) {
4355         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4356                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4357                 break;
4358         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4359                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4360                 break;
4361         default:
4362                 dev_err(&hdev->pdev->dev,
4363                         "Unsupported flow director mode %d\n",
4364                         hdev->fd_cfg.fd_mode);
4365                 return -EOPNOTSUPP;
4366         }
4367
4368         hdev->fd_cfg.proto_support =
4369                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4370                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4371         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4372         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4373         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4374         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4375         key_cfg->outer_sipv6_word_en = 0;
4376         key_cfg->outer_dipv6_word_en = 0;
4377
4378         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4379                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4380                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4381                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4382
4383         /* If use max 400bit key, we can support tuples for ether type */
4384         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4385                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4386                 key_cfg->tuple_active |=
4387                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4388         }
4389
4390         /* roce_type is used to filter roce frames
4391          * dst_vport is used to specify the rule
4392          */
4393         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4394
4395         ret = hclge_get_fd_allocation(hdev,
4396                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4397                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4398                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4399                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4400         if (ret)
4401                 return ret;
4402
4403         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4404 }
4405
4406 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4407                                 int loc, u8 *key, bool is_add)
4408 {
4409         struct hclge_fd_tcam_config_1_cmd *req1;
4410         struct hclge_fd_tcam_config_2_cmd *req2;
4411         struct hclge_fd_tcam_config_3_cmd *req3;
4412         struct hclge_desc desc[3];
4413         int ret;
4414
4415         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4416         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4417         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4418         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4419         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4420
4421         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4422         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4423         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4424
4425         req1->stage = stage;
4426         req1->xy_sel = sel_x ? 1 : 0;
4427         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4428         req1->index = cpu_to_le32(loc);
4429         req1->entry_vld = sel_x ? is_add : 0;
4430
4431         if (key) {
4432                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4433                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4434                        sizeof(req2->tcam_data));
4435                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4436                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4437         }
4438
4439         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4440         if (ret)
4441                 dev_err(&hdev->pdev->dev,
4442                         "config tcam key fail, ret=%d\n",
4443                         ret);
4444
4445         return ret;
4446 }
4447
4448 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4449                               struct hclge_fd_ad_data *action)
4450 {
4451         struct hclge_fd_ad_config_cmd *req;
4452         struct hclge_desc desc;
4453         u64 ad_data = 0;
4454         int ret;
4455
4456         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4457
4458         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4459         req->index = cpu_to_le32(loc);
4460         req->stage = stage;
4461
4462         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4463                       action->write_rule_id_to_bd);
4464         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4465                         action->rule_id);
4466         ad_data <<= 32;
4467         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4468         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4469                       action->forward_to_direct_queue);
4470         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4471                         action->queue_id);
4472         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4473         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4474                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4475         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4476         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4477                         action->counter_id);
4478
4479         req->ad_data = cpu_to_le64(ad_data);
4480         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4481         if (ret)
4482                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4483
4484         return ret;
4485 }
4486
4487 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4488                                    struct hclge_fd_rule *rule)
4489 {
4490         u16 tmp_x_s, tmp_y_s;
4491         u32 tmp_x_l, tmp_y_l;
4492         int i;
4493
4494         if (rule->unused_tuple & tuple_bit)
4495                 return true;
4496
4497         switch (tuple_bit) {
4498         case 0:
4499                 return false;
4500         case BIT(INNER_DST_MAC):
4501                 for (i = 0; i < ETH_ALEN; i++) {
4502                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4503                                rule->tuples_mask.dst_mac[i]);
4504                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4505                                rule->tuples_mask.dst_mac[i]);
4506                 }
4507
4508                 return true;
4509         case BIT(INNER_SRC_MAC):
4510                 for (i = 0; i < ETH_ALEN; i++) {
4511                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4512                                rule->tuples.src_mac[i]);
4513                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4514                                rule->tuples.src_mac[i]);
4515                 }
4516
4517                 return true;
4518         case BIT(INNER_VLAN_TAG_FST):
4519                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4520                        rule->tuples_mask.vlan_tag1);
4521                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4522                        rule->tuples_mask.vlan_tag1);
4523                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4524                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4525
4526                 return true;
4527         case BIT(INNER_ETH_TYPE):
4528                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4529                        rule->tuples_mask.ether_proto);
4530                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4531                        rule->tuples_mask.ether_proto);
4532                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4533                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4534
4535                 return true;
4536         case BIT(INNER_IP_TOS):
4537                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4538                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4539
4540                 return true;
4541         case BIT(INNER_IP_PROTO):
4542                 calc_x(*key_x, rule->tuples.ip_proto,
4543                        rule->tuples_mask.ip_proto);
4544                 calc_y(*key_y, rule->tuples.ip_proto,
4545                        rule->tuples_mask.ip_proto);
4546
4547                 return true;
4548         case BIT(INNER_SRC_IP):
4549                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4550                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4551                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4552                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4553                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4554                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4555
4556                 return true;
4557         case BIT(INNER_DST_IP):
4558                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4559                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4560                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4561                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4562                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4563                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4564
4565                 return true;
4566         case BIT(INNER_SRC_PORT):
4567                 calc_x(tmp_x_s, rule->tuples.src_port,
4568                        rule->tuples_mask.src_port);
4569                 calc_y(tmp_y_s, rule->tuples.src_port,
4570                        rule->tuples_mask.src_port);
4571                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4572                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4573
4574                 return true;
4575         case BIT(INNER_DST_PORT):
4576                 calc_x(tmp_x_s, rule->tuples.dst_port,
4577                        rule->tuples_mask.dst_port);
4578                 calc_y(tmp_y_s, rule->tuples.dst_port,
4579                        rule->tuples_mask.dst_port);
4580                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4581                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4582
4583                 return true;
4584         default:
4585                 return false;
4586         }
4587 }
4588
4589 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4590                                  u8 vf_id, u8 network_port_id)
4591 {
4592         u32 port_number = 0;
4593
4594         if (port_type == HOST_PORT) {
4595                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4596                                 pf_id);
4597                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4598                                 vf_id);
4599                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4600         } else {
4601                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4602                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4603                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4604         }
4605
4606         return port_number;
4607 }
4608
4609 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4610                                        __le32 *key_x, __le32 *key_y,
4611                                        struct hclge_fd_rule *rule)
4612 {
4613         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4614         u8 cur_pos = 0, tuple_size, shift_bits;
4615         int i;
4616
4617         for (i = 0; i < MAX_META_DATA; i++) {
4618                 tuple_size = meta_data_key_info[i].key_length;
4619                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4620
4621                 switch (tuple_bit) {
4622                 case BIT(ROCE_TYPE):
4623                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4624                         cur_pos += tuple_size;
4625                         break;
4626                 case BIT(DST_VPORT):
4627                         port_number = hclge_get_port_number(HOST_PORT, 0,
4628                                                             rule->vf_id, 0);
4629                         hnae3_set_field(meta_data,
4630                                         GENMASK(cur_pos + tuple_size, cur_pos),
4631                                         cur_pos, port_number);
4632                         cur_pos += tuple_size;
4633                         break;
4634                 default:
4635                         break;
4636                 }
4637         }
4638
4639         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4640         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4641         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4642
4643         *key_x = cpu_to_le32(tmp_x << shift_bits);
4644         *key_y = cpu_to_le32(tmp_y << shift_bits);
4645 }
4646
4647 /* A complete key is combined with meta data key and tuple key.
4648  * Meta data key is stored at the MSB region, and tuple key is stored at
4649  * the LSB region, unused bits will be filled 0.
4650  */
4651 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4652                             struct hclge_fd_rule *rule)
4653 {
4654         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4655         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4656         u8 *cur_key_x, *cur_key_y;
4657         int i, ret, tuple_size;
4658         u8 meta_data_region;
4659
4660         memset(key_x, 0, sizeof(key_x));
4661         memset(key_y, 0, sizeof(key_y));
4662         cur_key_x = key_x;
4663         cur_key_y = key_y;
4664
4665         for (i = 0 ; i < MAX_TUPLE; i++) {
4666                 bool tuple_valid;
4667                 u32 check_tuple;
4668
4669                 tuple_size = tuple_key_info[i].key_length / 8;
4670                 check_tuple = key_cfg->tuple_active & BIT(i);
4671
4672                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4673                                                      cur_key_y, rule);
4674                 if (tuple_valid) {
4675                         cur_key_x += tuple_size;
4676                         cur_key_y += tuple_size;
4677                 }
4678         }
4679
4680         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4681                         MAX_META_DATA_LENGTH / 8;
4682
4683         hclge_fd_convert_meta_data(key_cfg,
4684                                    (__le32 *)(key_x + meta_data_region),
4685                                    (__le32 *)(key_y + meta_data_region),
4686                                    rule);
4687
4688         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4689                                    true);
4690         if (ret) {
4691                 dev_err(&hdev->pdev->dev,
4692                         "fd key_y config fail, loc=%d, ret=%d\n",
4693                         rule->queue_id, ret);
4694                 return ret;
4695         }
4696
4697         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4698                                    true);
4699         if (ret)
4700                 dev_err(&hdev->pdev->dev,
4701                         "fd key_x config fail, loc=%d, ret=%d\n",
4702                         rule->queue_id, ret);
4703         return ret;
4704 }
4705
4706 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4707                                struct hclge_fd_rule *rule)
4708 {
4709         struct hclge_fd_ad_data ad_data;
4710
4711         ad_data.ad_id = rule->location;
4712
4713         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4714                 ad_data.drop_packet = true;
4715                 ad_data.forward_to_direct_queue = false;
4716                 ad_data.queue_id = 0;
4717         } else {
4718                 ad_data.drop_packet = false;
4719                 ad_data.forward_to_direct_queue = true;
4720                 ad_data.queue_id = rule->queue_id;
4721         }
4722
4723         ad_data.use_counter = false;
4724         ad_data.counter_id = 0;
4725
4726         ad_data.use_next_stage = false;
4727         ad_data.next_input_key = 0;
4728
4729         ad_data.write_rule_id_to_bd = true;
4730         ad_data.rule_id = rule->location;
4731
4732         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4733 }
4734
4735 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4736                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4737 {
4738         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4739         struct ethtool_usrip4_spec *usr_ip4_spec;
4740         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4741         struct ethtool_usrip6_spec *usr_ip6_spec;
4742         struct ethhdr *ether_spec;
4743
4744         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4745                 return -EINVAL;
4746
4747         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4748                 return -EOPNOTSUPP;
4749
4750         if ((fs->flow_type & FLOW_EXT) &&
4751             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4752                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4753                 return -EOPNOTSUPP;
4754         }
4755
4756         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4757         case SCTP_V4_FLOW:
4758         case TCP_V4_FLOW:
4759         case UDP_V4_FLOW:
4760                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4761                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4762
4763                 if (!tcp_ip4_spec->ip4src)
4764                         *unused |= BIT(INNER_SRC_IP);
4765
4766                 if (!tcp_ip4_spec->ip4dst)
4767                         *unused |= BIT(INNER_DST_IP);
4768
4769                 if (!tcp_ip4_spec->psrc)
4770                         *unused |= BIT(INNER_SRC_PORT);
4771
4772                 if (!tcp_ip4_spec->pdst)
4773                         *unused |= BIT(INNER_DST_PORT);
4774
4775                 if (!tcp_ip4_spec->tos)
4776                         *unused |= BIT(INNER_IP_TOS);
4777
4778                 break;
4779         case IP_USER_FLOW:
4780                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4781                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4782                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4783
4784                 if (!usr_ip4_spec->ip4src)
4785                         *unused |= BIT(INNER_SRC_IP);
4786
4787                 if (!usr_ip4_spec->ip4dst)
4788                         *unused |= BIT(INNER_DST_IP);
4789
4790                 if (!usr_ip4_spec->tos)
4791                         *unused |= BIT(INNER_IP_TOS);
4792
4793                 if (!usr_ip4_spec->proto)
4794                         *unused |= BIT(INNER_IP_PROTO);
4795
4796                 if (usr_ip4_spec->l4_4_bytes)
4797                         return -EOPNOTSUPP;
4798
4799                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4800                         return -EOPNOTSUPP;
4801
4802                 break;
4803         case SCTP_V6_FLOW:
4804         case TCP_V6_FLOW:
4805         case UDP_V6_FLOW:
4806                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4807                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4808                         BIT(INNER_IP_TOS);
4809
4810                 /* check whether src/dst ip address used */
4811                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4812                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4813                         *unused |= BIT(INNER_SRC_IP);
4814
4815                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4816                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4817                         *unused |= BIT(INNER_DST_IP);
4818
4819                 if (!tcp_ip6_spec->psrc)
4820                         *unused |= BIT(INNER_SRC_PORT);
4821
4822                 if (!tcp_ip6_spec->pdst)
4823                         *unused |= BIT(INNER_DST_PORT);
4824
4825                 if (tcp_ip6_spec->tclass)
4826                         return -EOPNOTSUPP;
4827
4828                 break;
4829         case IPV6_USER_FLOW:
4830                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4831                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4832                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4833                         BIT(INNER_DST_PORT);
4834
4835                 /* check whether src/dst ip address used */
4836                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4837                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4838                         *unused |= BIT(INNER_SRC_IP);
4839
4840                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4841                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4842                         *unused |= BIT(INNER_DST_IP);
4843
4844                 if (!usr_ip6_spec->l4_proto)
4845                         *unused |= BIT(INNER_IP_PROTO);
4846
4847                 if (usr_ip6_spec->tclass)
4848                         return -EOPNOTSUPP;
4849
4850                 if (usr_ip6_spec->l4_4_bytes)
4851                         return -EOPNOTSUPP;
4852
4853                 break;
4854         case ETHER_FLOW:
4855                 ether_spec = &fs->h_u.ether_spec;
4856                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4857                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4858                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4859
4860                 if (is_zero_ether_addr(ether_spec->h_source))
4861                         *unused |= BIT(INNER_SRC_MAC);
4862
4863                 if (is_zero_ether_addr(ether_spec->h_dest))
4864                         *unused |= BIT(INNER_DST_MAC);
4865
4866                 if (!ether_spec->h_proto)
4867                         *unused |= BIT(INNER_ETH_TYPE);
4868
4869                 break;
4870         default:
4871                 return -EOPNOTSUPP;
4872         }
4873
4874         if ((fs->flow_type & FLOW_EXT)) {
4875                 if (fs->h_ext.vlan_etype)
4876                         return -EOPNOTSUPP;
4877                 if (!fs->h_ext.vlan_tci)
4878                         *unused |= BIT(INNER_VLAN_TAG_FST);
4879
4880                 if (fs->m_ext.vlan_tci) {
4881                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4882                                 return -EINVAL;
4883                 }
4884         } else {
4885                 *unused |= BIT(INNER_VLAN_TAG_FST);
4886         }
4887
4888         if (fs->flow_type & FLOW_MAC_EXT) {
4889                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4890                         return -EOPNOTSUPP;
4891
4892                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4893                         *unused |= BIT(INNER_DST_MAC);
4894                 else
4895                         *unused &= ~(BIT(INNER_DST_MAC));
4896         }
4897
4898         return 0;
4899 }
4900
4901 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4902 {
4903         struct hclge_fd_rule *rule = NULL;
4904         struct hlist_node *node2;
4905
4906         spin_lock_bh(&hdev->fd_rule_lock);
4907         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4908                 if (rule->location >= location)
4909                         break;
4910         }
4911
4912         spin_unlock_bh(&hdev->fd_rule_lock);
4913
4914         return  rule && rule->location == location;
4915 }
4916
4917 /* make sure being called after lock up with fd_rule_lock */
4918 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4919                                      struct hclge_fd_rule *new_rule,
4920                                      u16 location,
4921                                      bool is_add)
4922 {
4923         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4924         struct hlist_node *node2;
4925
4926         if (is_add && !new_rule)
4927                 return -EINVAL;
4928
4929         hlist_for_each_entry_safe(rule, node2,
4930                                   &hdev->fd_rule_list, rule_node) {
4931                 if (rule->location >= location)
4932                         break;
4933                 parent = rule;
4934         }
4935
4936         if (rule && rule->location == location) {
4937                 hlist_del(&rule->rule_node);
4938                 kfree(rule);
4939                 hdev->hclge_fd_rule_num--;
4940
4941                 if (!is_add) {
4942                         if (!hdev->hclge_fd_rule_num)
4943                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4944                         clear_bit(location, hdev->fd_bmap);
4945
4946                         return 0;
4947                 }
4948         } else if (!is_add) {
4949                 dev_err(&hdev->pdev->dev,
4950                         "delete fail, rule %d is inexistent\n",
4951                         location);
4952                 return -EINVAL;
4953         }
4954
4955         INIT_HLIST_NODE(&new_rule->rule_node);
4956
4957         if (parent)
4958                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4959         else
4960                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4961
4962         set_bit(location, hdev->fd_bmap);
4963         hdev->hclge_fd_rule_num++;
4964         hdev->fd_active_type = new_rule->rule_type;
4965
4966         return 0;
4967 }
4968
4969 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4970                               struct ethtool_rx_flow_spec *fs,
4971                               struct hclge_fd_rule *rule)
4972 {
4973         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4974
4975         switch (flow_type) {
4976         case SCTP_V4_FLOW:
4977         case TCP_V4_FLOW:
4978         case UDP_V4_FLOW:
4979                 rule->tuples.src_ip[IPV4_INDEX] =
4980                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4981                 rule->tuples_mask.src_ip[IPV4_INDEX] =
4982                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4983
4984                 rule->tuples.dst_ip[IPV4_INDEX] =
4985                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4986                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
4987                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4988
4989                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4990                 rule->tuples_mask.src_port =
4991                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4992
4993                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4994                 rule->tuples_mask.dst_port =
4995                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4996
4997                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4998                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4999
5000                 rule->tuples.ether_proto = ETH_P_IP;
5001                 rule->tuples_mask.ether_proto = 0xFFFF;
5002
5003                 break;
5004         case IP_USER_FLOW:
5005                 rule->tuples.src_ip[IPV4_INDEX] =
5006                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5007                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5008                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5009
5010                 rule->tuples.dst_ip[IPV4_INDEX] =
5011                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5012                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5013                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5014
5015                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5016                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5017
5018                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5019                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5020
5021                 rule->tuples.ether_proto = ETH_P_IP;
5022                 rule->tuples_mask.ether_proto = 0xFFFF;
5023
5024                 break;
5025         case SCTP_V6_FLOW:
5026         case TCP_V6_FLOW:
5027         case UDP_V6_FLOW:
5028                 be32_to_cpu_array(rule->tuples.src_ip,
5029                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5030                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5031                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5032
5033                 be32_to_cpu_array(rule->tuples.dst_ip,
5034                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5035                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5036                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5037
5038                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5039                 rule->tuples_mask.src_port =
5040                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5041
5042                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5043                 rule->tuples_mask.dst_port =
5044                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5045
5046                 rule->tuples.ether_proto = ETH_P_IPV6;
5047                 rule->tuples_mask.ether_proto = 0xFFFF;
5048
5049                 break;
5050         case IPV6_USER_FLOW:
5051                 be32_to_cpu_array(rule->tuples.src_ip,
5052                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5053                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5054                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5055
5056                 be32_to_cpu_array(rule->tuples.dst_ip,
5057                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5058                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5059                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5060
5061                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5062                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5063
5064                 rule->tuples.ether_proto = ETH_P_IPV6;
5065                 rule->tuples_mask.ether_proto = 0xFFFF;
5066
5067                 break;
5068         case ETHER_FLOW:
5069                 ether_addr_copy(rule->tuples.src_mac,
5070                                 fs->h_u.ether_spec.h_source);
5071                 ether_addr_copy(rule->tuples_mask.src_mac,
5072                                 fs->m_u.ether_spec.h_source);
5073
5074                 ether_addr_copy(rule->tuples.dst_mac,
5075                                 fs->h_u.ether_spec.h_dest);
5076                 ether_addr_copy(rule->tuples_mask.dst_mac,
5077                                 fs->m_u.ether_spec.h_dest);
5078
5079                 rule->tuples.ether_proto =
5080                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5081                 rule->tuples_mask.ether_proto =
5082                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5083
5084                 break;
5085         default:
5086                 return -EOPNOTSUPP;
5087         }
5088
5089         switch (flow_type) {
5090         case SCTP_V4_FLOW:
5091         case SCTP_V6_FLOW:
5092                 rule->tuples.ip_proto = IPPROTO_SCTP;
5093                 rule->tuples_mask.ip_proto = 0xFF;
5094                 break;
5095         case TCP_V4_FLOW:
5096         case TCP_V6_FLOW:
5097                 rule->tuples.ip_proto = IPPROTO_TCP;
5098                 rule->tuples_mask.ip_proto = 0xFF;
5099                 break;
5100         case UDP_V4_FLOW:
5101         case UDP_V6_FLOW:
5102                 rule->tuples.ip_proto = IPPROTO_UDP;
5103                 rule->tuples_mask.ip_proto = 0xFF;
5104                 break;
5105         default:
5106                 break;
5107         }
5108
5109         if ((fs->flow_type & FLOW_EXT)) {
5110                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5111                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5112         }
5113
5114         if (fs->flow_type & FLOW_MAC_EXT) {
5115                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5116                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5117         }
5118
5119         return 0;
5120 }
5121
5122 /* make sure being called after lock up with fd_rule_lock */
5123 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5124                                 struct hclge_fd_rule *rule)
5125 {
5126         int ret;
5127
5128         if (!rule) {
5129                 dev_err(&hdev->pdev->dev,
5130                         "The flow director rule is NULL\n");
5131                 return -EINVAL;
5132         }
5133
5134         /* it will never fail here, so needn't to check return value */
5135         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5136
5137         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5138         if (ret)
5139                 goto clear_rule;
5140
5141         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5142         if (ret)
5143                 goto clear_rule;
5144
5145         return 0;
5146
5147 clear_rule:
5148         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5149         return ret;
5150 }
5151
5152 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5153                               struct ethtool_rxnfc *cmd)
5154 {
5155         struct hclge_vport *vport = hclge_get_vport(handle);
5156         struct hclge_dev *hdev = vport->back;
5157         u16 dst_vport_id = 0, q_index = 0;
5158         struct ethtool_rx_flow_spec *fs;
5159         struct hclge_fd_rule *rule;
5160         u32 unused = 0;
5161         u8 action;
5162         int ret;
5163
5164         if (!hnae3_dev_fd_supported(hdev))
5165                 return -EOPNOTSUPP;
5166
5167         if (!hdev->fd_en) {
5168                 dev_warn(&hdev->pdev->dev,
5169                          "Please enable flow director first\n");
5170                 return -EOPNOTSUPP;
5171         }
5172
5173         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5174
5175         ret = hclge_fd_check_spec(hdev, fs, &unused);
5176         if (ret) {
5177                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5178                 return ret;
5179         }
5180
5181         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5182                 action = HCLGE_FD_ACTION_DROP_PACKET;
5183         } else {
5184                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5185                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5186                 u16 tqps;
5187
5188                 if (vf > hdev->num_req_vfs) {
5189                         dev_err(&hdev->pdev->dev,
5190                                 "Error: vf id (%d) > max vf num (%d)\n",
5191                                 vf, hdev->num_req_vfs);
5192                         return -EINVAL;
5193                 }
5194
5195                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5196                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5197
5198                 if (ring >= tqps) {
5199                         dev_err(&hdev->pdev->dev,
5200                                 "Error: queue id (%d) > max tqp num (%d)\n",
5201                                 ring, tqps - 1);
5202                         return -EINVAL;
5203                 }
5204
5205                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5206                 q_index = ring;
5207         }
5208
5209         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5210         if (!rule)
5211                 return -ENOMEM;
5212
5213         ret = hclge_fd_get_tuple(hdev, fs, rule);
5214         if (ret) {
5215                 kfree(rule);
5216                 return ret;
5217         }
5218
5219         rule->flow_type = fs->flow_type;
5220
5221         rule->location = fs->location;
5222         rule->unused_tuple = unused;
5223         rule->vf_id = dst_vport_id;
5224         rule->queue_id = q_index;
5225         rule->action = action;
5226         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5227
5228         /* to avoid rule conflict, when user configure rule by ethtool,
5229          * we need to clear all arfs rules
5230          */
5231         hclge_clear_arfs_rules(handle);
5232
5233         spin_lock_bh(&hdev->fd_rule_lock);
5234         ret = hclge_fd_config_rule(hdev, rule);
5235
5236         spin_unlock_bh(&hdev->fd_rule_lock);
5237
5238         return ret;
5239 }
5240
5241 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5242                               struct ethtool_rxnfc *cmd)
5243 {
5244         struct hclge_vport *vport = hclge_get_vport(handle);
5245         struct hclge_dev *hdev = vport->back;
5246         struct ethtool_rx_flow_spec *fs;
5247         int ret;
5248
5249         if (!hnae3_dev_fd_supported(hdev))
5250                 return -EOPNOTSUPP;
5251
5252         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5253
5254         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5255                 return -EINVAL;
5256
5257         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5258                 dev_err(&hdev->pdev->dev,
5259                         "Delete fail, rule %d is inexistent\n", fs->location);
5260                 return -ENOENT;
5261         }
5262
5263         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5264                                    NULL, false);
5265         if (ret)
5266                 return ret;
5267
5268         spin_lock_bh(&hdev->fd_rule_lock);
5269         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5270
5271         spin_unlock_bh(&hdev->fd_rule_lock);
5272
5273         return ret;
5274 }
5275
5276 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5277                                      bool clear_list)
5278 {
5279         struct hclge_vport *vport = hclge_get_vport(handle);
5280         struct hclge_dev *hdev = vport->back;
5281         struct hclge_fd_rule *rule;
5282         struct hlist_node *node;
5283         u16 location;
5284
5285         if (!hnae3_dev_fd_supported(hdev))
5286                 return;
5287
5288         spin_lock_bh(&hdev->fd_rule_lock);
5289         for_each_set_bit(location, hdev->fd_bmap,
5290                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5291                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5292                                      NULL, false);
5293
5294         if (clear_list) {
5295                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5296                                           rule_node) {
5297                         hlist_del(&rule->rule_node);
5298                         kfree(rule);
5299                 }
5300                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5301                 hdev->hclge_fd_rule_num = 0;
5302                 bitmap_zero(hdev->fd_bmap,
5303                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5304         }
5305
5306         spin_unlock_bh(&hdev->fd_rule_lock);
5307 }
5308
5309 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5310 {
5311         struct hclge_vport *vport = hclge_get_vport(handle);
5312         struct hclge_dev *hdev = vport->back;
5313         struct hclge_fd_rule *rule;
5314         struct hlist_node *node;
5315         int ret;
5316
5317         /* Return ok here, because reset error handling will check this
5318          * return value. If error is returned here, the reset process will
5319          * fail.
5320          */
5321         if (!hnae3_dev_fd_supported(hdev))
5322                 return 0;
5323
5324         /* if fd is disabled, should not restore it when reset */
5325         if (!hdev->fd_en)
5326                 return 0;
5327
5328         spin_lock_bh(&hdev->fd_rule_lock);
5329         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5330                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5331                 if (!ret)
5332                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5333
5334                 if (ret) {
5335                         dev_warn(&hdev->pdev->dev,
5336                                  "Restore rule %d failed, remove it\n",
5337                                  rule->location);
5338                         clear_bit(rule->location, hdev->fd_bmap);
5339                         hlist_del(&rule->rule_node);
5340                         kfree(rule);
5341                         hdev->hclge_fd_rule_num--;
5342                 }
5343         }
5344
5345         if (hdev->hclge_fd_rule_num)
5346                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5347
5348         spin_unlock_bh(&hdev->fd_rule_lock);
5349
5350         return 0;
5351 }
5352
5353 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5354                                  struct ethtool_rxnfc *cmd)
5355 {
5356         struct hclge_vport *vport = hclge_get_vport(handle);
5357         struct hclge_dev *hdev = vport->back;
5358
5359         if (!hnae3_dev_fd_supported(hdev))
5360                 return -EOPNOTSUPP;
5361
5362         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5363         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5364
5365         return 0;
5366 }
5367
5368 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5369                                   struct ethtool_rxnfc *cmd)
5370 {
5371         struct hclge_vport *vport = hclge_get_vport(handle);
5372         struct hclge_fd_rule *rule = NULL;
5373         struct hclge_dev *hdev = vport->back;
5374         struct ethtool_rx_flow_spec *fs;
5375         struct hlist_node *node2;
5376
5377         if (!hnae3_dev_fd_supported(hdev))
5378                 return -EOPNOTSUPP;
5379
5380         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5381
5382         spin_lock_bh(&hdev->fd_rule_lock);
5383
5384         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5385                 if (rule->location >= fs->location)
5386                         break;
5387         }
5388
5389         if (!rule || fs->location != rule->location) {
5390                 spin_unlock_bh(&hdev->fd_rule_lock);
5391
5392                 return -ENOENT;
5393         }
5394
5395         fs->flow_type = rule->flow_type;
5396         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5397         case SCTP_V4_FLOW:
5398         case TCP_V4_FLOW:
5399         case UDP_V4_FLOW:
5400                 fs->h_u.tcp_ip4_spec.ip4src =
5401                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5402                 fs->m_u.tcp_ip4_spec.ip4src =
5403                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5404                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5405
5406                 fs->h_u.tcp_ip4_spec.ip4dst =
5407                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5408                 fs->m_u.tcp_ip4_spec.ip4dst =
5409                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5410                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5411
5412                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5413                 fs->m_u.tcp_ip4_spec.psrc =
5414                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5415                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5416
5417                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5418                 fs->m_u.tcp_ip4_spec.pdst =
5419                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5420                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5421
5422                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5423                 fs->m_u.tcp_ip4_spec.tos =
5424                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5425                                 0 : rule->tuples_mask.ip_tos;
5426
5427                 break;
5428         case IP_USER_FLOW:
5429                 fs->h_u.usr_ip4_spec.ip4src =
5430                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5431                 fs->m_u.tcp_ip4_spec.ip4src =
5432                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5433                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5434
5435                 fs->h_u.usr_ip4_spec.ip4dst =
5436                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5437                 fs->m_u.usr_ip4_spec.ip4dst =
5438                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5439                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5440
5441                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5442                 fs->m_u.usr_ip4_spec.tos =
5443                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5444                                 0 : rule->tuples_mask.ip_tos;
5445
5446                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5447                 fs->m_u.usr_ip4_spec.proto =
5448                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5449                                 0 : rule->tuples_mask.ip_proto;
5450
5451                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5452
5453                 break;
5454         case SCTP_V6_FLOW:
5455         case TCP_V6_FLOW:
5456         case UDP_V6_FLOW:
5457                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5458                                   rule->tuples.src_ip, IPV6_SIZE);
5459                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5460                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5461                                sizeof(int) * IPV6_SIZE);
5462                 else
5463                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5464                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5465
5466                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5467                                   rule->tuples.dst_ip, IPV6_SIZE);
5468                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5469                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5470                                sizeof(int) * IPV6_SIZE);
5471                 else
5472                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5473                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5474
5475                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5476                 fs->m_u.tcp_ip6_spec.psrc =
5477                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5478                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5479
5480                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5481                 fs->m_u.tcp_ip6_spec.pdst =
5482                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5483                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5484
5485                 break;
5486         case IPV6_USER_FLOW:
5487                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5488                                   rule->tuples.src_ip, IPV6_SIZE);
5489                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5490                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5491                                sizeof(int) * IPV6_SIZE);
5492                 else
5493                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5494                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5495
5496                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5497                                   rule->tuples.dst_ip, IPV6_SIZE);
5498                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5499                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5500                                sizeof(int) * IPV6_SIZE);
5501                 else
5502                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5503                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5504
5505                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5506                 fs->m_u.usr_ip6_spec.l4_proto =
5507                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5508                                 0 : rule->tuples_mask.ip_proto;
5509
5510                 break;
5511         case ETHER_FLOW:
5512                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5513                                 rule->tuples.src_mac);
5514                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5515                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5516                 else
5517                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5518                                         rule->tuples_mask.src_mac);
5519
5520                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5521                                 rule->tuples.dst_mac);
5522                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5523                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5524                 else
5525                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5526                                         rule->tuples_mask.dst_mac);
5527
5528                 fs->h_u.ether_spec.h_proto =
5529                                 cpu_to_be16(rule->tuples.ether_proto);
5530                 fs->m_u.ether_spec.h_proto =
5531                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5532                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5533
5534                 break;
5535         default:
5536                 spin_unlock_bh(&hdev->fd_rule_lock);
5537                 return -EOPNOTSUPP;
5538         }
5539
5540         if (fs->flow_type & FLOW_EXT) {
5541                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5542                 fs->m_ext.vlan_tci =
5543                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5544                                 cpu_to_be16(VLAN_VID_MASK) :
5545                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5546         }
5547
5548         if (fs->flow_type & FLOW_MAC_EXT) {
5549                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5550                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5551                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5552                 else
5553                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5554                                         rule->tuples_mask.dst_mac);
5555         }
5556
5557         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5558                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5559         } else {
5560                 u64 vf_id;
5561
5562                 fs->ring_cookie = rule->queue_id;
5563                 vf_id = rule->vf_id;
5564                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5565                 fs->ring_cookie |= vf_id;
5566         }
5567
5568         spin_unlock_bh(&hdev->fd_rule_lock);
5569
5570         return 0;
5571 }
5572
5573 static int hclge_get_all_rules(struct hnae3_handle *handle,
5574                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5575 {
5576         struct hclge_vport *vport = hclge_get_vport(handle);
5577         struct hclge_dev *hdev = vport->back;
5578         struct hclge_fd_rule *rule;
5579         struct hlist_node *node2;
5580         int cnt = 0;
5581
5582         if (!hnae3_dev_fd_supported(hdev))
5583                 return -EOPNOTSUPP;
5584
5585         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5586
5587         spin_lock_bh(&hdev->fd_rule_lock);
5588         hlist_for_each_entry_safe(rule, node2,
5589                                   &hdev->fd_rule_list, rule_node) {
5590                 if (cnt == cmd->rule_cnt) {
5591                         spin_unlock_bh(&hdev->fd_rule_lock);
5592                         return -EMSGSIZE;
5593                 }
5594
5595                 rule_locs[cnt] = rule->location;
5596                 cnt++;
5597         }
5598
5599         spin_unlock_bh(&hdev->fd_rule_lock);
5600
5601         cmd->rule_cnt = cnt;
5602
5603         return 0;
5604 }
5605
5606 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5607                                      struct hclge_fd_rule_tuples *tuples)
5608 {
5609         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5610         tuples->ip_proto = fkeys->basic.ip_proto;
5611         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5612
5613         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5614                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5615                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5616         } else {
5617                 memcpy(tuples->src_ip,
5618                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5619                        sizeof(tuples->src_ip));
5620                 memcpy(tuples->dst_ip,
5621                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5622                        sizeof(tuples->dst_ip));
5623         }
5624 }
5625
5626 /* traverse all rules, check whether an existed rule has the same tuples */
5627 static struct hclge_fd_rule *
5628 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5629                           const struct hclge_fd_rule_tuples *tuples)
5630 {
5631         struct hclge_fd_rule *rule = NULL;
5632         struct hlist_node *node;
5633
5634         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5635                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5636                         return rule;
5637         }
5638
5639         return NULL;
5640 }
5641
5642 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5643                                      struct hclge_fd_rule *rule)
5644 {
5645         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5646                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5647                              BIT(INNER_SRC_PORT);
5648         rule->action = 0;
5649         rule->vf_id = 0;
5650         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5651         if (tuples->ether_proto == ETH_P_IP) {
5652                 if (tuples->ip_proto == IPPROTO_TCP)
5653                         rule->flow_type = TCP_V4_FLOW;
5654                 else
5655                         rule->flow_type = UDP_V4_FLOW;
5656         } else {
5657                 if (tuples->ip_proto == IPPROTO_TCP)
5658                         rule->flow_type = TCP_V6_FLOW;
5659                 else
5660                         rule->flow_type = UDP_V6_FLOW;
5661         }
5662         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5663         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5664 }
5665
5666 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5667                                       u16 flow_id, struct flow_keys *fkeys)
5668 {
5669         struct hclge_vport *vport = hclge_get_vport(handle);
5670         struct hclge_fd_rule_tuples new_tuples;
5671         struct hclge_dev *hdev = vport->back;
5672         struct hclge_fd_rule *rule;
5673         u16 tmp_queue_id;
5674         u16 bit_id;
5675         int ret;
5676
5677         if (!hnae3_dev_fd_supported(hdev))
5678                 return -EOPNOTSUPP;
5679
5680         memset(&new_tuples, 0, sizeof(new_tuples));
5681         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5682
5683         spin_lock_bh(&hdev->fd_rule_lock);
5684
5685         /* when there is already fd rule existed add by user,
5686          * arfs should not work
5687          */
5688         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5689                 spin_unlock_bh(&hdev->fd_rule_lock);
5690
5691                 return -EOPNOTSUPP;
5692         }
5693
5694         /* check is there flow director filter existed for this flow,
5695          * if not, create a new filter for it;
5696          * if filter exist with different queue id, modify the filter;
5697          * if filter exist with same queue id, do nothing
5698          */
5699         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5700         if (!rule) {
5701                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5702                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5703                         spin_unlock_bh(&hdev->fd_rule_lock);
5704
5705                         return -ENOSPC;
5706                 }
5707
5708                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5709                 if (!rule) {
5710                         spin_unlock_bh(&hdev->fd_rule_lock);
5711
5712                         return -ENOMEM;
5713                 }
5714
5715                 set_bit(bit_id, hdev->fd_bmap);
5716                 rule->location = bit_id;
5717                 rule->flow_id = flow_id;
5718                 rule->queue_id = queue_id;
5719                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5720                 ret = hclge_fd_config_rule(hdev, rule);
5721
5722                 spin_unlock_bh(&hdev->fd_rule_lock);
5723
5724                 if (ret)
5725                         return ret;
5726
5727                 return rule->location;
5728         }
5729
5730         spin_unlock_bh(&hdev->fd_rule_lock);
5731
5732         if (rule->queue_id == queue_id)
5733                 return rule->location;
5734
5735         tmp_queue_id = rule->queue_id;
5736         rule->queue_id = queue_id;
5737         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5738         if (ret) {
5739                 rule->queue_id = tmp_queue_id;
5740                 return ret;
5741         }
5742
5743         return rule->location;
5744 }
5745
5746 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5747 {
5748 #ifdef CONFIG_RFS_ACCEL
5749         struct hnae3_handle *handle = &hdev->vport[0].nic;
5750         struct hclge_fd_rule *rule;
5751         struct hlist_node *node;
5752         HLIST_HEAD(del_list);
5753
5754         spin_lock_bh(&hdev->fd_rule_lock);
5755         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5756                 spin_unlock_bh(&hdev->fd_rule_lock);
5757                 return;
5758         }
5759         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5760                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5761                                         rule->flow_id, rule->location)) {
5762                         hlist_del_init(&rule->rule_node);
5763                         hlist_add_head(&rule->rule_node, &del_list);
5764                         hdev->hclge_fd_rule_num--;
5765                         clear_bit(rule->location, hdev->fd_bmap);
5766                 }
5767         }
5768         spin_unlock_bh(&hdev->fd_rule_lock);
5769
5770         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5771                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5772                                      rule->location, NULL, false);
5773                 kfree(rule);
5774         }
5775 #endif
5776 }
5777
5778 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5779 {
5780 #ifdef CONFIG_RFS_ACCEL
5781         struct hclge_vport *vport = hclge_get_vport(handle);
5782         struct hclge_dev *hdev = vport->back;
5783
5784         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5785                 hclge_del_all_fd_entries(handle, true);
5786 #endif
5787 }
5788
5789 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5790 {
5791         struct hclge_vport *vport = hclge_get_vport(handle);
5792         struct hclge_dev *hdev = vport->back;
5793
5794         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5795                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5796 }
5797
5798 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5799 {
5800         struct hclge_vport *vport = hclge_get_vport(handle);
5801         struct hclge_dev *hdev = vport->back;
5802
5803         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5804 }
5805
5806 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5807 {
5808         struct hclge_vport *vport = hclge_get_vport(handle);
5809         struct hclge_dev *hdev = vport->back;
5810
5811         return hdev->rst_stats.hw_reset_done_cnt;
5812 }
5813
5814 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5815 {
5816         struct hclge_vport *vport = hclge_get_vport(handle);
5817         struct hclge_dev *hdev = vport->back;
5818         bool clear;
5819
5820         hdev->fd_en = enable;
5821         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5822         if (!enable)
5823                 hclge_del_all_fd_entries(handle, clear);
5824         else
5825                 hclge_restore_fd_entries(handle);
5826 }
5827
5828 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5829 {
5830         struct hclge_desc desc;
5831         struct hclge_config_mac_mode_cmd *req =
5832                 (struct hclge_config_mac_mode_cmd *)desc.data;
5833         u32 loop_en = 0;
5834         int ret;
5835
5836         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5837         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5838         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5839         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5840         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5841         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5842         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5843         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5844         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5845         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5846         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5847         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5848         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5849         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5850         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5851         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5852
5853         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5854         if (ret)
5855                 dev_err(&hdev->pdev->dev,
5856                         "mac enable fail, ret =%d.\n", ret);
5857 }
5858
5859 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5860 {
5861         struct hclge_config_mac_mode_cmd *req;
5862         struct hclge_desc desc;
5863         u32 loop_en;
5864         int ret;
5865
5866         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5867         /* 1 Read out the MAC mode config at first */
5868         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5869         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5870         if (ret) {
5871                 dev_err(&hdev->pdev->dev,
5872                         "mac loopback get fail, ret =%d.\n", ret);
5873                 return ret;
5874         }
5875
5876         /* 2 Then setup the loopback flag */
5877         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5878         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5879         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5880         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5881
5882         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5883
5884         /* 3 Config mac work mode with loopback flag
5885          * and its original configure parameters
5886          */
5887         hclge_cmd_reuse_desc(&desc, false);
5888         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5889         if (ret)
5890                 dev_err(&hdev->pdev->dev,
5891                         "mac loopback set fail, ret =%d.\n", ret);
5892         return ret;
5893 }
5894
5895 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5896                                      enum hnae3_loop loop_mode)
5897 {
5898 #define HCLGE_SERDES_RETRY_MS   10
5899 #define HCLGE_SERDES_RETRY_NUM  100
5900
5901 #define HCLGE_MAC_LINK_STATUS_MS   10
5902 #define HCLGE_MAC_LINK_STATUS_NUM  100
5903 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5904 #define HCLGE_MAC_LINK_STATUS_UP   1
5905
5906         struct hclge_serdes_lb_cmd *req;
5907         struct hclge_desc desc;
5908         int mac_link_ret = 0;
5909         int ret, i = 0;
5910         u8 loop_mode_b;
5911
5912         req = (struct hclge_serdes_lb_cmd *)desc.data;
5913         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5914
5915         switch (loop_mode) {
5916         case HNAE3_LOOP_SERIAL_SERDES:
5917                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5918                 break;
5919         case HNAE3_LOOP_PARALLEL_SERDES:
5920                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5921                 break;
5922         default:
5923                 dev_err(&hdev->pdev->dev,
5924                         "unsupported serdes loopback mode %d\n", loop_mode);
5925                 return -ENOTSUPP;
5926         }
5927
5928         if (en) {
5929                 req->enable = loop_mode_b;
5930                 req->mask = loop_mode_b;
5931                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5932         } else {
5933                 req->mask = loop_mode_b;
5934                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5935         }
5936
5937         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5938         if (ret) {
5939                 dev_err(&hdev->pdev->dev,
5940                         "serdes loopback set fail, ret = %d\n", ret);
5941                 return ret;
5942         }
5943
5944         do {
5945                 msleep(HCLGE_SERDES_RETRY_MS);
5946                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5947                                            true);
5948                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5949                 if (ret) {
5950                         dev_err(&hdev->pdev->dev,
5951                                 "serdes loopback get, ret = %d\n", ret);
5952                         return ret;
5953                 }
5954         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5955                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5956
5957         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5958                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5959                 return -EBUSY;
5960         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5961                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5962                 return -EIO;
5963         }
5964
5965         hclge_cfg_mac_mode(hdev, en);
5966
5967         i = 0;
5968         do {
5969                 /* serdes Internal loopback, independent of the network cable.*/
5970                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5971                 ret = hclge_get_mac_link_status(hdev);
5972                 if (ret == mac_link_ret)
5973                         return 0;
5974         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5975
5976         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5977
5978         return -EBUSY;
5979 }
5980
5981 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5982                             int stream_id, bool enable)
5983 {
5984         struct hclge_desc desc;
5985         struct hclge_cfg_com_tqp_queue_cmd *req =
5986                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5987         int ret;
5988
5989         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5990         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5991         req->stream_id = cpu_to_le16(stream_id);
5992         req->enable |= enable << HCLGE_TQP_ENABLE_B;
5993
5994         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5995         if (ret)
5996                 dev_err(&hdev->pdev->dev,
5997                         "Tqp enable fail, status =%d.\n", ret);
5998         return ret;
5999 }
6000
6001 static int hclge_set_loopback(struct hnae3_handle *handle,
6002                               enum hnae3_loop loop_mode, bool en)
6003 {
6004         struct hclge_vport *vport = hclge_get_vport(handle);
6005         struct hnae3_knic_private_info *kinfo;
6006         struct hclge_dev *hdev = vport->back;
6007         int i, ret;
6008
6009         switch (loop_mode) {
6010         case HNAE3_LOOP_APP:
6011                 ret = hclge_set_app_loopback(hdev, en);
6012                 break;
6013         case HNAE3_LOOP_SERIAL_SERDES:
6014         case HNAE3_LOOP_PARALLEL_SERDES:
6015                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6016                 break;
6017         default:
6018                 ret = -ENOTSUPP;
6019                 dev_err(&hdev->pdev->dev,
6020                         "loop_mode %d is not supported\n", loop_mode);
6021                 break;
6022         }
6023
6024         if (ret)
6025                 return ret;
6026
6027         kinfo = &vport->nic.kinfo;
6028         for (i = 0; i < kinfo->num_tqps; i++) {
6029                 ret = hclge_tqp_enable(hdev, i, 0, en);
6030                 if (ret)
6031                         return ret;
6032         }
6033
6034         return 0;
6035 }
6036
6037 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6038 {
6039         struct hclge_vport *vport = hclge_get_vport(handle);
6040         struct hnae3_knic_private_info *kinfo;
6041         struct hnae3_queue *queue;
6042         struct hclge_tqp *tqp;
6043         int i;
6044
6045         kinfo = &vport->nic.kinfo;
6046         for (i = 0; i < kinfo->num_tqps; i++) {
6047                 queue = handle->kinfo.tqp[i];
6048                 tqp = container_of(queue, struct hclge_tqp, q);
6049                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6050         }
6051 }
6052
6053 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6054 {
6055         struct hclge_vport *vport = hclge_get_vport(handle);
6056         struct hclge_dev *hdev = vport->back;
6057
6058         if (enable) {
6059                 mod_timer(&hdev->service_timer, jiffies + HZ);
6060         } else {
6061                 del_timer_sync(&hdev->service_timer);
6062                 cancel_work_sync(&hdev->service_task);
6063                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6064         }
6065 }
6066
6067 static int hclge_ae_start(struct hnae3_handle *handle)
6068 {
6069         struct hclge_vport *vport = hclge_get_vport(handle);
6070         struct hclge_dev *hdev = vport->back;
6071
6072         /* mac enable */
6073         hclge_cfg_mac_mode(hdev, true);
6074         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6075         hdev->hw.mac.link = 0;
6076
6077         /* reset tqp stats */
6078         hclge_reset_tqp_stats(handle);
6079
6080         hclge_mac_start_phy(hdev);
6081
6082         return 0;
6083 }
6084
6085 static void hclge_ae_stop(struct hnae3_handle *handle)
6086 {
6087         struct hclge_vport *vport = hclge_get_vport(handle);
6088         struct hclge_dev *hdev = vport->back;
6089         int i;
6090
6091         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6092
6093         hclge_clear_arfs_rules(handle);
6094
6095         /* If it is not PF reset, the firmware will disable the MAC,
6096          * so it only need to stop phy here.
6097          */
6098         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6099             hdev->reset_type != HNAE3_FUNC_RESET) {
6100                 hclge_mac_stop_phy(hdev);
6101                 return;
6102         }
6103
6104         for (i = 0; i < handle->kinfo.num_tqps; i++)
6105                 hclge_reset_tqp(handle, i);
6106
6107         /* Mac disable */
6108         hclge_cfg_mac_mode(hdev, false);
6109
6110         hclge_mac_stop_phy(hdev);
6111
6112         /* reset tqp stats */
6113         hclge_reset_tqp_stats(handle);
6114         hclge_update_link_status(hdev);
6115 }
6116
6117 int hclge_vport_start(struct hclge_vport *vport)
6118 {
6119         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6120         vport->last_active_jiffies = jiffies;
6121         return 0;
6122 }
6123
6124 void hclge_vport_stop(struct hclge_vport *vport)
6125 {
6126         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6127 }
6128
6129 static int hclge_client_start(struct hnae3_handle *handle)
6130 {
6131         struct hclge_vport *vport = hclge_get_vport(handle);
6132
6133         return hclge_vport_start(vport);
6134 }
6135
6136 static void hclge_client_stop(struct hnae3_handle *handle)
6137 {
6138         struct hclge_vport *vport = hclge_get_vport(handle);
6139
6140         hclge_vport_stop(vport);
6141 }
6142
6143 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6144                                          u16 cmdq_resp, u8  resp_code,
6145                                          enum hclge_mac_vlan_tbl_opcode op)
6146 {
6147         struct hclge_dev *hdev = vport->back;
6148         int return_status = -EIO;
6149
6150         if (cmdq_resp) {
6151                 dev_err(&hdev->pdev->dev,
6152                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6153                         cmdq_resp);
6154                 return -EIO;
6155         }
6156
6157         if (op == HCLGE_MAC_VLAN_ADD) {
6158                 if ((!resp_code) || (resp_code == 1)) {
6159                         return_status = 0;
6160                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6161                         return_status = -ENOSPC;
6162                         dev_err(&hdev->pdev->dev,
6163                                 "add mac addr failed for uc_overflow.\n");
6164                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6165                         return_status = -ENOSPC;
6166                         dev_err(&hdev->pdev->dev,
6167                                 "add mac addr failed for mc_overflow.\n");
6168                 } else {
6169                         dev_err(&hdev->pdev->dev,
6170                                 "add mac addr failed for undefined, code=%d.\n",
6171                                 resp_code);
6172                 }
6173         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6174                 if (!resp_code) {
6175                         return_status = 0;
6176                 } else if (resp_code == 1) {
6177                         return_status = -ENOENT;
6178                         dev_dbg(&hdev->pdev->dev,
6179                                 "remove mac addr failed for miss.\n");
6180                 } else {
6181                         dev_err(&hdev->pdev->dev,
6182                                 "remove mac addr failed for undefined, code=%d.\n",
6183                                 resp_code);
6184                 }
6185         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6186                 if (!resp_code) {
6187                         return_status = 0;
6188                 } else if (resp_code == 1) {
6189                         return_status = -ENOENT;
6190                         dev_dbg(&hdev->pdev->dev,
6191                                 "lookup mac addr failed for miss.\n");
6192                 } else {
6193                         dev_err(&hdev->pdev->dev,
6194                                 "lookup mac addr failed for undefined, code=%d.\n",
6195                                 resp_code);
6196                 }
6197         } else {
6198                 return_status = -EINVAL;
6199                 dev_err(&hdev->pdev->dev,
6200                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6201                         op);
6202         }
6203
6204         return return_status;
6205 }
6206
6207 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6208 {
6209 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6210
6211         int word_num;
6212         int bit_num;
6213
6214         if (vfid > 255 || vfid < 0)
6215                 return -EIO;
6216
6217         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6218                 word_num = vfid / 32;
6219                 bit_num  = vfid % 32;
6220                 if (clr)
6221                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6222                 else
6223                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6224         } else {
6225                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6226                 bit_num  = vfid % 32;
6227                 if (clr)
6228                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6229                 else
6230                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6231         }
6232
6233         return 0;
6234 }
6235
6236 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6237 {
6238 #define HCLGE_DESC_NUMBER 3
6239 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6240         int i, j;
6241
6242         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6243                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6244                         if (desc[i].data[j])
6245                                 return false;
6246
6247         return true;
6248 }
6249
6250 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6251                                    const u8 *addr, bool is_mc)
6252 {
6253         const unsigned char *mac_addr = addr;
6254         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6255                        (mac_addr[0]) | (mac_addr[1] << 8);
6256         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6257
6258         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6259         if (is_mc) {
6260                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6261                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6262         }
6263
6264         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6265         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6266 }
6267
6268 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6269                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6270 {
6271         struct hclge_dev *hdev = vport->back;
6272         struct hclge_desc desc;
6273         u8 resp_code;
6274         u16 retval;
6275         int ret;
6276
6277         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6278
6279         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6280
6281         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6282         if (ret) {
6283                 dev_err(&hdev->pdev->dev,
6284                         "del mac addr failed for cmd_send, ret =%d.\n",
6285                         ret);
6286                 return ret;
6287         }
6288         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6289         retval = le16_to_cpu(desc.retval);
6290
6291         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6292                                              HCLGE_MAC_VLAN_REMOVE);
6293 }
6294
6295 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6296                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6297                                      struct hclge_desc *desc,
6298                                      bool is_mc)
6299 {
6300         struct hclge_dev *hdev = vport->back;
6301         u8 resp_code;
6302         u16 retval;
6303         int ret;
6304
6305         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6306         if (is_mc) {
6307                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6308                 memcpy(desc[0].data,
6309                        req,
6310                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6311                 hclge_cmd_setup_basic_desc(&desc[1],
6312                                            HCLGE_OPC_MAC_VLAN_ADD,
6313                                            true);
6314                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6315                 hclge_cmd_setup_basic_desc(&desc[2],
6316                                            HCLGE_OPC_MAC_VLAN_ADD,
6317                                            true);
6318                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6319         } else {
6320                 memcpy(desc[0].data,
6321                        req,
6322                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6323                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6324         }
6325         if (ret) {
6326                 dev_err(&hdev->pdev->dev,
6327                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6328                         ret);
6329                 return ret;
6330         }
6331         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6332         retval = le16_to_cpu(desc[0].retval);
6333
6334         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6335                                              HCLGE_MAC_VLAN_LKUP);
6336 }
6337
6338 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6339                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6340                                   struct hclge_desc *mc_desc)
6341 {
6342         struct hclge_dev *hdev = vport->back;
6343         int cfg_status;
6344         u8 resp_code;
6345         u16 retval;
6346         int ret;
6347
6348         if (!mc_desc) {
6349                 struct hclge_desc desc;
6350
6351                 hclge_cmd_setup_basic_desc(&desc,
6352                                            HCLGE_OPC_MAC_VLAN_ADD,
6353                                            false);
6354                 memcpy(desc.data, req,
6355                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6356                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6357                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6358                 retval = le16_to_cpu(desc.retval);
6359
6360                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6361                                                            resp_code,
6362                                                            HCLGE_MAC_VLAN_ADD);
6363         } else {
6364                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6365                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6366                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6367                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6368                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6369                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6370                 memcpy(mc_desc[0].data, req,
6371                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6372                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6373                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6374                 retval = le16_to_cpu(mc_desc[0].retval);
6375
6376                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6377                                                            resp_code,
6378                                                            HCLGE_MAC_VLAN_ADD);
6379         }
6380
6381         if (ret) {
6382                 dev_err(&hdev->pdev->dev,
6383                         "add mac addr failed for cmd_send, ret =%d.\n",
6384                         ret);
6385                 return ret;
6386         }
6387
6388         return cfg_status;
6389 }
6390
6391 static int hclge_init_umv_space(struct hclge_dev *hdev)
6392 {
6393         u16 allocated_size = 0;
6394         int ret;
6395
6396         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6397                                   true);
6398         if (ret)
6399                 return ret;
6400
6401         if (allocated_size < hdev->wanted_umv_size)
6402                 dev_warn(&hdev->pdev->dev,
6403                          "Alloc umv space failed, want %d, get %d\n",
6404                          hdev->wanted_umv_size, allocated_size);
6405
6406         mutex_init(&hdev->umv_mutex);
6407         hdev->max_umv_size = allocated_size;
6408         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6409          * preserve some unicast mac vlan table entries shared by pf
6410          * and its vfs.
6411          */
6412         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6413         hdev->share_umv_size = hdev->priv_umv_size +
6414                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6415
6416         return 0;
6417 }
6418
6419 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6420 {
6421         int ret;
6422
6423         if (hdev->max_umv_size > 0) {
6424                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6425                                           false);
6426                 if (ret)
6427                         return ret;
6428                 hdev->max_umv_size = 0;
6429         }
6430         mutex_destroy(&hdev->umv_mutex);
6431
6432         return 0;
6433 }
6434
6435 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6436                                u16 *allocated_size, bool is_alloc)
6437 {
6438         struct hclge_umv_spc_alc_cmd *req;
6439         struct hclge_desc desc;
6440         int ret;
6441
6442         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6443         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6444         if (!is_alloc)
6445                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6446
6447         req->space_size = cpu_to_le32(space_size);
6448
6449         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6450         if (ret) {
6451                 dev_err(&hdev->pdev->dev,
6452                         "%s umv space failed for cmd_send, ret =%d\n",
6453                         is_alloc ? "allocate" : "free", ret);
6454                 return ret;
6455         }
6456
6457         if (is_alloc && allocated_size)
6458                 *allocated_size = le32_to_cpu(desc.data[1]);
6459
6460         return 0;
6461 }
6462
6463 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6464 {
6465         struct hclge_vport *vport;
6466         int i;
6467
6468         for (i = 0; i < hdev->num_alloc_vport; i++) {
6469                 vport = &hdev->vport[i];
6470                 vport->used_umv_num = 0;
6471         }
6472
6473         mutex_lock(&hdev->umv_mutex);
6474         hdev->share_umv_size = hdev->priv_umv_size +
6475                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6476         mutex_unlock(&hdev->umv_mutex);
6477 }
6478
6479 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6480 {
6481         struct hclge_dev *hdev = vport->back;
6482         bool is_full;
6483
6484         mutex_lock(&hdev->umv_mutex);
6485         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6486                    hdev->share_umv_size == 0);
6487         mutex_unlock(&hdev->umv_mutex);
6488
6489         return is_full;
6490 }
6491
6492 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6493 {
6494         struct hclge_dev *hdev = vport->back;
6495
6496         mutex_lock(&hdev->umv_mutex);
6497         if (is_free) {
6498                 if (vport->used_umv_num > hdev->priv_umv_size)
6499                         hdev->share_umv_size++;
6500
6501                 if (vport->used_umv_num > 0)
6502                         vport->used_umv_num--;
6503         } else {
6504                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6505                     hdev->share_umv_size > 0)
6506                         hdev->share_umv_size--;
6507                 vport->used_umv_num++;
6508         }
6509         mutex_unlock(&hdev->umv_mutex);
6510 }
6511
6512 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6513                              const unsigned char *addr)
6514 {
6515         struct hclge_vport *vport = hclge_get_vport(handle);
6516
6517         return hclge_add_uc_addr_common(vport, addr);
6518 }
6519
6520 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6521                              const unsigned char *addr)
6522 {
6523         struct hclge_dev *hdev = vport->back;
6524         struct hclge_mac_vlan_tbl_entry_cmd req;
6525         struct hclge_desc desc;
6526         u16 egress_port = 0;
6527         int ret;
6528
6529         /* mac addr check */
6530         if (is_zero_ether_addr(addr) ||
6531             is_broadcast_ether_addr(addr) ||
6532             is_multicast_ether_addr(addr)) {
6533                 dev_err(&hdev->pdev->dev,
6534                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6535                          addr, is_zero_ether_addr(addr),
6536                          is_broadcast_ether_addr(addr),
6537                          is_multicast_ether_addr(addr));
6538                 return -EINVAL;
6539         }
6540
6541         memset(&req, 0, sizeof(req));
6542
6543         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6544                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6545
6546         req.egress_port = cpu_to_le16(egress_port);
6547
6548         hclge_prepare_mac_addr(&req, addr, false);
6549
6550         /* Lookup the mac address in the mac_vlan table, and add
6551          * it if the entry is inexistent. Repeated unicast entry
6552          * is not allowed in the mac vlan table.
6553          */
6554         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6555         if (ret == -ENOENT) {
6556                 if (!hclge_is_umv_space_full(vport)) {
6557                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6558                         if (!ret)
6559                                 hclge_update_umv_space(vport, false);
6560                         return ret;
6561                 }
6562
6563                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6564                         hdev->priv_umv_size);
6565
6566                 return -ENOSPC;
6567         }
6568
6569         /* check if we just hit the duplicate */
6570         if (!ret) {
6571                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6572                          vport->vport_id, addr);
6573                 return 0;
6574         }
6575
6576         dev_err(&hdev->pdev->dev,
6577                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6578                 addr);
6579
6580         return ret;
6581 }
6582
6583 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6584                             const unsigned char *addr)
6585 {
6586         struct hclge_vport *vport = hclge_get_vport(handle);
6587
6588         return hclge_rm_uc_addr_common(vport, addr);
6589 }
6590
6591 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6592                             const unsigned char *addr)
6593 {
6594         struct hclge_dev *hdev = vport->back;
6595         struct hclge_mac_vlan_tbl_entry_cmd req;
6596         int ret;
6597
6598         /* mac addr check */
6599         if (is_zero_ether_addr(addr) ||
6600             is_broadcast_ether_addr(addr) ||
6601             is_multicast_ether_addr(addr)) {
6602                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6603                         addr);
6604                 return -EINVAL;
6605         }
6606
6607         memset(&req, 0, sizeof(req));
6608         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6609         hclge_prepare_mac_addr(&req, addr, false);
6610         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6611         if (!ret)
6612                 hclge_update_umv_space(vport, true);
6613
6614         return ret;
6615 }
6616
6617 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6618                              const unsigned char *addr)
6619 {
6620         struct hclge_vport *vport = hclge_get_vport(handle);
6621
6622         return hclge_add_mc_addr_common(vport, addr);
6623 }
6624
6625 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6626                              const unsigned char *addr)
6627 {
6628         struct hclge_dev *hdev = vport->back;
6629         struct hclge_mac_vlan_tbl_entry_cmd req;
6630         struct hclge_desc desc[3];
6631         int status;
6632
6633         /* mac addr check */
6634         if (!is_multicast_ether_addr(addr)) {
6635                 dev_err(&hdev->pdev->dev,
6636                         "Add mc mac err! invalid mac:%pM.\n",
6637                          addr);
6638                 return -EINVAL;
6639         }
6640         memset(&req, 0, sizeof(req));
6641         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6642         hclge_prepare_mac_addr(&req, addr, true);
6643         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6644         if (status) {
6645                 /* This mac addr do not exist, add new entry for it */
6646                 memset(desc[0].data, 0, sizeof(desc[0].data));
6647                 memset(desc[1].data, 0, sizeof(desc[0].data));
6648                 memset(desc[2].data, 0, sizeof(desc[0].data));
6649         }
6650         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6651         if (status)
6652                 return status;
6653         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6654
6655         if (status == -ENOSPC)
6656                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6657
6658         return status;
6659 }
6660
6661 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6662                             const unsigned char *addr)
6663 {
6664         struct hclge_vport *vport = hclge_get_vport(handle);
6665
6666         return hclge_rm_mc_addr_common(vport, addr);
6667 }
6668
6669 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6670                             const unsigned char *addr)
6671 {
6672         struct hclge_dev *hdev = vport->back;
6673         struct hclge_mac_vlan_tbl_entry_cmd req;
6674         enum hclge_cmd_status status;
6675         struct hclge_desc desc[3];
6676
6677         /* mac addr check */
6678         if (!is_multicast_ether_addr(addr)) {
6679                 dev_dbg(&hdev->pdev->dev,
6680                         "Remove mc mac err! invalid mac:%pM.\n",
6681                          addr);
6682                 return -EINVAL;
6683         }
6684
6685         memset(&req, 0, sizeof(req));
6686         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6687         hclge_prepare_mac_addr(&req, addr, true);
6688         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6689         if (!status) {
6690                 /* This mac addr exist, remove this handle's VFID for it */
6691                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6692                 if (status)
6693                         return status;
6694
6695                 if (hclge_is_all_function_id_zero(desc))
6696                         /* All the vfid is zero, so need to delete this entry */
6697                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6698                 else
6699                         /* Not all the vfid is zero, update the vfid */
6700                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6701
6702         } else {
6703                 /* Maybe this mac address is in mta table, but it cannot be
6704                  * deleted here because an entry of mta represents an address
6705                  * range rather than a specific address. the delete action to
6706                  * all entries will take effect in update_mta_status called by
6707                  * hns3_nic_set_rx_mode.
6708                  */
6709                 status = 0;
6710         }
6711
6712         return status;
6713 }
6714
6715 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6716                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6717 {
6718         struct hclge_vport_mac_addr_cfg *mac_cfg;
6719         struct list_head *list;
6720
6721         if (!vport->vport_id)
6722                 return;
6723
6724         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6725         if (!mac_cfg)
6726                 return;
6727
6728         mac_cfg->hd_tbl_status = true;
6729         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6730
6731         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6732                &vport->uc_mac_list : &vport->mc_mac_list;
6733
6734         list_add_tail(&mac_cfg->node, list);
6735 }
6736
6737 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6738                               bool is_write_tbl,
6739                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6740 {
6741         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6742         struct list_head *list;
6743         bool uc_flag, mc_flag;
6744
6745         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6746                &vport->uc_mac_list : &vport->mc_mac_list;
6747
6748         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6749         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6750
6751         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6752                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6753                         if (uc_flag && mac_cfg->hd_tbl_status)
6754                                 hclge_rm_uc_addr_common(vport, mac_addr);
6755
6756                         if (mc_flag && mac_cfg->hd_tbl_status)
6757                                 hclge_rm_mc_addr_common(vport, mac_addr);
6758
6759                         list_del(&mac_cfg->node);
6760                         kfree(mac_cfg);
6761                         break;
6762                 }
6763         }
6764 }
6765
6766 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6767                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6768 {
6769         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6770         struct list_head *list;
6771
6772         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6773                &vport->uc_mac_list : &vport->mc_mac_list;
6774
6775         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6776                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6777                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6778
6779                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6780                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6781
6782                 mac_cfg->hd_tbl_status = false;
6783                 if (is_del_list) {
6784                         list_del(&mac_cfg->node);
6785                         kfree(mac_cfg);
6786                 }
6787         }
6788 }
6789
6790 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6791 {
6792         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6793         struct hclge_vport *vport;
6794         int i;
6795
6796         mutex_lock(&hdev->vport_cfg_mutex);
6797         for (i = 0; i < hdev->num_alloc_vport; i++) {
6798                 vport = &hdev->vport[i];
6799                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6800                         list_del(&mac->node);
6801                         kfree(mac);
6802                 }
6803
6804                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6805                         list_del(&mac->node);
6806                         kfree(mac);
6807                 }
6808         }
6809         mutex_unlock(&hdev->vport_cfg_mutex);
6810 }
6811
6812 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6813                                               u16 cmdq_resp, u8 resp_code)
6814 {
6815 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6816 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6817 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6818 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6819
6820         int return_status;
6821
6822         if (cmdq_resp) {
6823                 dev_err(&hdev->pdev->dev,
6824                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6825                         cmdq_resp);
6826                 return -EIO;
6827         }
6828
6829         switch (resp_code) {
6830         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6831         case HCLGE_ETHERTYPE_ALREADY_ADD:
6832                 return_status = 0;
6833                 break;
6834         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6835                 dev_err(&hdev->pdev->dev,
6836                         "add mac ethertype failed for manager table overflow.\n");
6837                 return_status = -EIO;
6838                 break;
6839         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6840                 dev_err(&hdev->pdev->dev,
6841                         "add mac ethertype failed for key conflict.\n");
6842                 return_status = -EIO;
6843                 break;
6844         default:
6845                 dev_err(&hdev->pdev->dev,
6846                         "add mac ethertype failed for undefined, code=%d.\n",
6847                         resp_code);
6848                 return_status = -EIO;
6849         }
6850
6851         return return_status;
6852 }
6853
6854 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6855                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6856 {
6857         struct hclge_desc desc;
6858         u8 resp_code;
6859         u16 retval;
6860         int ret;
6861
6862         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6863         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6864
6865         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6866         if (ret) {
6867                 dev_err(&hdev->pdev->dev,
6868                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6869                         ret);
6870                 return ret;
6871         }
6872
6873         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6874         retval = le16_to_cpu(desc.retval);
6875
6876         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6877 }
6878
6879 static int init_mgr_tbl(struct hclge_dev *hdev)
6880 {
6881         int ret;
6882         int i;
6883
6884         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6885                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6886                 if (ret) {
6887                         dev_err(&hdev->pdev->dev,
6888                                 "add mac ethertype failed, ret =%d.\n",
6889                                 ret);
6890                         return ret;
6891                 }
6892         }
6893
6894         return 0;
6895 }
6896
6897 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6898 {
6899         struct hclge_vport *vport = hclge_get_vport(handle);
6900         struct hclge_dev *hdev = vport->back;
6901
6902         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6903 }
6904
6905 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6906                               bool is_first)
6907 {
6908         const unsigned char *new_addr = (const unsigned char *)p;
6909         struct hclge_vport *vport = hclge_get_vport(handle);
6910         struct hclge_dev *hdev = vport->back;
6911         int ret;
6912
6913         /* mac addr check */
6914         if (is_zero_ether_addr(new_addr) ||
6915             is_broadcast_ether_addr(new_addr) ||
6916             is_multicast_ether_addr(new_addr)) {
6917                 dev_err(&hdev->pdev->dev,
6918                         "Change uc mac err! invalid mac:%p.\n",
6919                          new_addr);
6920                 return -EINVAL;
6921         }
6922
6923         if ((!is_first || is_kdump_kernel()) &&
6924             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6925                 dev_warn(&hdev->pdev->dev,
6926                          "remove old uc mac address fail.\n");
6927
6928         ret = hclge_add_uc_addr(handle, new_addr);
6929         if (ret) {
6930                 dev_err(&hdev->pdev->dev,
6931                         "add uc mac address fail, ret =%d.\n",
6932                         ret);
6933
6934                 if (!is_first &&
6935                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6936                         dev_err(&hdev->pdev->dev,
6937                                 "restore uc mac address fail.\n");
6938
6939                 return -EIO;
6940         }
6941
6942         ret = hclge_pause_addr_cfg(hdev, new_addr);
6943         if (ret) {
6944                 dev_err(&hdev->pdev->dev,
6945                         "configure mac pause address fail, ret =%d.\n",
6946                         ret);
6947                 return -EIO;
6948         }
6949
6950         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6951
6952         return 0;
6953 }
6954
6955 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6956                           int cmd)
6957 {
6958         struct hclge_vport *vport = hclge_get_vport(handle);
6959         struct hclge_dev *hdev = vport->back;
6960
6961         if (!hdev->hw.mac.phydev)
6962                 return -EOPNOTSUPP;
6963
6964         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6965 }
6966
6967 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6968                                       u8 fe_type, bool filter_en, u8 vf_id)
6969 {
6970         struct hclge_vlan_filter_ctrl_cmd *req;
6971         struct hclge_desc desc;
6972         int ret;
6973
6974         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6975
6976         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6977         req->vlan_type = vlan_type;
6978         req->vlan_fe = filter_en ? fe_type : 0;
6979         req->vf_id = vf_id;
6980
6981         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6982         if (ret)
6983                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6984                         ret);
6985
6986         return ret;
6987 }
6988
6989 #define HCLGE_FILTER_TYPE_VF            0
6990 #define HCLGE_FILTER_TYPE_PORT          1
6991 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
6992 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
6993 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
6994 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
6995 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
6996 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
6997                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6998 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
6999                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7000
7001 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7002 {
7003         struct hclge_vport *vport = hclge_get_vport(handle);
7004         struct hclge_dev *hdev = vport->back;
7005
7006         if (hdev->pdev->revision >= 0x21) {
7007                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7008                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7009                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7010                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7011         } else {
7012                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7013                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7014                                            0);
7015         }
7016         if (enable)
7017                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7018         else
7019                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7020 }
7021
7022 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7023                                     bool is_kill, u16 vlan, u8 qos,
7024                                     __be16 proto)
7025 {
7026 #define HCLGE_MAX_VF_BYTES  16
7027         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7028         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7029         struct hclge_desc desc[2];
7030         u8 vf_byte_val;
7031         u8 vf_byte_off;
7032         int ret;
7033
7034         /* if vf vlan table is full, firmware will close vf vlan filter, it
7035          * is unable and unnecessary to add new vlan id to vf vlan filter
7036          */
7037         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7038                 return 0;
7039
7040         hclge_cmd_setup_basic_desc(&desc[0],
7041                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7042         hclge_cmd_setup_basic_desc(&desc[1],
7043                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7044
7045         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7046
7047         vf_byte_off = vfid / 8;
7048         vf_byte_val = 1 << (vfid % 8);
7049
7050         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7051         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7052
7053         req0->vlan_id  = cpu_to_le16(vlan);
7054         req0->vlan_cfg = is_kill;
7055
7056         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7057                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7058         else
7059                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7060
7061         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7062         if (ret) {
7063                 dev_err(&hdev->pdev->dev,
7064                         "Send vf vlan command fail, ret =%d.\n",
7065                         ret);
7066                 return ret;
7067         }
7068
7069         if (!is_kill) {
7070 #define HCLGE_VF_VLAN_NO_ENTRY  2
7071                 if (!req0->resp_code || req0->resp_code == 1)
7072                         return 0;
7073
7074                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7075                         set_bit(vfid, hdev->vf_vlan_full);
7076                         dev_warn(&hdev->pdev->dev,
7077                                  "vf vlan table is full, vf vlan filter is disabled\n");
7078                         return 0;
7079                 }
7080
7081                 dev_err(&hdev->pdev->dev,
7082                         "Add vf vlan filter fail, ret =%d.\n",
7083                         req0->resp_code);
7084         } else {
7085 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7086                 if (!req0->resp_code)
7087                         return 0;
7088
7089                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7090                         dev_warn(&hdev->pdev->dev,
7091                                  "vlan %d filter is not in vf vlan table\n",
7092                                  vlan);
7093                         return 0;
7094                 }
7095
7096                 dev_err(&hdev->pdev->dev,
7097                         "Kill vf vlan filter fail, ret =%d.\n",
7098                         req0->resp_code);
7099         }
7100
7101         return -EIO;
7102 }
7103
7104 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7105                                       u16 vlan_id, bool is_kill)
7106 {
7107         struct hclge_vlan_filter_pf_cfg_cmd *req;
7108         struct hclge_desc desc;
7109         u8 vlan_offset_byte_val;
7110         u8 vlan_offset_byte;
7111         u8 vlan_offset_160;
7112         int ret;
7113
7114         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7115
7116         vlan_offset_160 = vlan_id / 160;
7117         vlan_offset_byte = (vlan_id % 160) / 8;
7118         vlan_offset_byte_val = 1 << (vlan_id % 8);
7119
7120         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7121         req->vlan_offset = vlan_offset_160;
7122         req->vlan_cfg = is_kill;
7123         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7124
7125         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7126         if (ret)
7127                 dev_err(&hdev->pdev->dev,
7128                         "port vlan command, send fail, ret =%d.\n", ret);
7129         return ret;
7130 }
7131
7132 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7133                                     u16 vport_id, u16 vlan_id, u8 qos,
7134                                     bool is_kill)
7135 {
7136         u16 vport_idx, vport_num = 0;
7137         int ret;
7138
7139         if (is_kill && !vlan_id)
7140                 return 0;
7141
7142         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7143                                        0, proto);
7144         if (ret) {
7145                 dev_err(&hdev->pdev->dev,
7146                         "Set %d vport vlan filter config fail, ret =%d.\n",
7147                         vport_id, ret);
7148                 return ret;
7149         }
7150
7151         /* vlan 0 may be added twice when 8021q module is enabled */
7152         if (!is_kill && !vlan_id &&
7153             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7154                 return 0;
7155
7156         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7157                 dev_err(&hdev->pdev->dev,
7158                         "Add port vlan failed, vport %d is already in vlan %d\n",
7159                         vport_id, vlan_id);
7160                 return -EINVAL;
7161         }
7162
7163         if (is_kill &&
7164             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7165                 dev_err(&hdev->pdev->dev,
7166                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7167                         vport_id, vlan_id);
7168                 return -EINVAL;
7169         }
7170
7171         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7172                 vport_num++;
7173
7174         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7175                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7176                                                  is_kill);
7177
7178         return ret;
7179 }
7180
7181 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7182 {
7183         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7184         struct hclge_vport_vtag_tx_cfg_cmd *req;
7185         struct hclge_dev *hdev = vport->back;
7186         struct hclge_desc desc;
7187         int status;
7188
7189         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7190
7191         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7192         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7193         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7194         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7195                       vcfg->accept_tag1 ? 1 : 0);
7196         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7197                       vcfg->accept_untag1 ? 1 : 0);
7198         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7199                       vcfg->accept_tag2 ? 1 : 0);
7200         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7201                       vcfg->accept_untag2 ? 1 : 0);
7202         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7203                       vcfg->insert_tag1_en ? 1 : 0);
7204         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7205                       vcfg->insert_tag2_en ? 1 : 0);
7206         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7207
7208         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7209         req->vf_bitmap[req->vf_offset] =
7210                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7211
7212         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7213         if (status)
7214                 dev_err(&hdev->pdev->dev,
7215                         "Send port txvlan cfg command fail, ret =%d\n",
7216                         status);
7217
7218         return status;
7219 }
7220
7221 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7222 {
7223         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7224         struct hclge_vport_vtag_rx_cfg_cmd *req;
7225         struct hclge_dev *hdev = vport->back;
7226         struct hclge_desc desc;
7227         int status;
7228
7229         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7230
7231         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7232         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7233                       vcfg->strip_tag1_en ? 1 : 0);
7234         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7235                       vcfg->strip_tag2_en ? 1 : 0);
7236         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7237                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7238         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7239                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7240
7241         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7242         req->vf_bitmap[req->vf_offset] =
7243                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7244
7245         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7246         if (status)
7247                 dev_err(&hdev->pdev->dev,
7248                         "Send port rxvlan cfg command fail, ret =%d\n",
7249                         status);
7250
7251         return status;
7252 }
7253
7254 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7255                                   u16 port_base_vlan_state,
7256                                   u16 vlan_tag)
7257 {
7258         int ret;
7259
7260         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7261                 vport->txvlan_cfg.accept_tag1 = true;
7262                 vport->txvlan_cfg.insert_tag1_en = false;
7263                 vport->txvlan_cfg.default_tag1 = 0;
7264         } else {
7265                 vport->txvlan_cfg.accept_tag1 = false;
7266                 vport->txvlan_cfg.insert_tag1_en = true;
7267                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7268         }
7269
7270         vport->txvlan_cfg.accept_untag1 = true;
7271
7272         /* accept_tag2 and accept_untag2 are not supported on
7273          * pdev revision(0x20), new revision support them,
7274          * this two fields can not be configured by user.
7275          */
7276         vport->txvlan_cfg.accept_tag2 = true;
7277         vport->txvlan_cfg.accept_untag2 = true;
7278         vport->txvlan_cfg.insert_tag2_en = false;
7279         vport->txvlan_cfg.default_tag2 = 0;
7280
7281         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7282                 vport->rxvlan_cfg.strip_tag1_en = false;
7283                 vport->rxvlan_cfg.strip_tag2_en =
7284                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7285         } else {
7286                 vport->rxvlan_cfg.strip_tag1_en =
7287                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7288                 vport->rxvlan_cfg.strip_tag2_en = true;
7289         }
7290         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7291         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7292
7293         ret = hclge_set_vlan_tx_offload_cfg(vport);
7294         if (ret)
7295                 return ret;
7296
7297         return hclge_set_vlan_rx_offload_cfg(vport);
7298 }
7299
7300 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7301 {
7302         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7303         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7304         struct hclge_desc desc;
7305         int status;
7306
7307         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7308         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7309         rx_req->ot_fst_vlan_type =
7310                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7311         rx_req->ot_sec_vlan_type =
7312                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7313         rx_req->in_fst_vlan_type =
7314                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7315         rx_req->in_sec_vlan_type =
7316                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7317
7318         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7319         if (status) {
7320                 dev_err(&hdev->pdev->dev,
7321                         "Send rxvlan protocol type command fail, ret =%d\n",
7322                         status);
7323                 return status;
7324         }
7325
7326         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7327
7328         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7329         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7330         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7331
7332         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7333         if (status)
7334                 dev_err(&hdev->pdev->dev,
7335                         "Send txvlan protocol type command fail, ret =%d\n",
7336                         status);
7337
7338         return status;
7339 }
7340
7341 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7342 {
7343 #define HCLGE_DEF_VLAN_TYPE             0x8100
7344
7345         struct hnae3_handle *handle = &hdev->vport[0].nic;
7346         struct hclge_vport *vport;
7347         int ret;
7348         int i;
7349
7350         if (hdev->pdev->revision >= 0x21) {
7351                 /* for revision 0x21, vf vlan filter is per function */
7352                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7353                         vport = &hdev->vport[i];
7354                         ret = hclge_set_vlan_filter_ctrl(hdev,
7355                                                          HCLGE_FILTER_TYPE_VF,
7356                                                          HCLGE_FILTER_FE_EGRESS,
7357                                                          true,
7358                                                          vport->vport_id);
7359                         if (ret)
7360                                 return ret;
7361                 }
7362
7363                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7364                                                  HCLGE_FILTER_FE_INGRESS, true,
7365                                                  0);
7366                 if (ret)
7367                         return ret;
7368         } else {
7369                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7370                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7371                                                  true, 0);
7372                 if (ret)
7373                         return ret;
7374         }
7375
7376         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7377
7378         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7379         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7380         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7381         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7382         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7383         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7384
7385         ret = hclge_set_vlan_protocol_type(hdev);
7386         if (ret)
7387                 return ret;
7388
7389         for (i = 0; i < hdev->num_alloc_vport; i++) {
7390                 u16 vlan_tag;
7391
7392                 vport = &hdev->vport[i];
7393                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7394
7395                 ret = hclge_vlan_offload_cfg(vport,
7396                                              vport->port_base_vlan_cfg.state,
7397                                              vlan_tag);
7398                 if (ret)
7399                         return ret;
7400         }
7401
7402         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7403 }
7404
7405 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7406                                        bool writen_to_tbl)
7407 {
7408         struct hclge_vport_vlan_cfg *vlan;
7409
7410         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7411         if (!vlan)
7412                 return;
7413
7414         vlan->hd_tbl_status = writen_to_tbl;
7415         vlan->vlan_id = vlan_id;
7416
7417         list_add_tail(&vlan->node, &vport->vlan_list);
7418 }
7419
7420 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7421 {
7422         struct hclge_vport_vlan_cfg *vlan, *tmp;
7423         struct hclge_dev *hdev = vport->back;
7424         int ret;
7425
7426         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7427                 if (!vlan->hd_tbl_status) {
7428                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7429                                                        vport->vport_id,
7430                                                        vlan->vlan_id, 0, false);
7431                         if (ret) {
7432                                 dev_err(&hdev->pdev->dev,
7433                                         "restore vport vlan list failed, ret=%d\n",
7434                                         ret);
7435                                 return ret;
7436                         }
7437                 }
7438                 vlan->hd_tbl_status = true;
7439         }
7440
7441         return 0;
7442 }
7443
7444 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7445                                       bool is_write_tbl)
7446 {
7447         struct hclge_vport_vlan_cfg *vlan, *tmp;
7448         struct hclge_dev *hdev = vport->back;
7449
7450         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7451                 if (vlan->vlan_id == vlan_id) {
7452                         if (is_write_tbl && vlan->hd_tbl_status)
7453                                 hclge_set_vlan_filter_hw(hdev,
7454                                                          htons(ETH_P_8021Q),
7455                                                          vport->vport_id,
7456                                                          vlan_id, 0,
7457                                                          true);
7458
7459                         list_del(&vlan->node);
7460                         kfree(vlan);
7461                         break;
7462                 }
7463         }
7464 }
7465
7466 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7467 {
7468         struct hclge_vport_vlan_cfg *vlan, *tmp;
7469         struct hclge_dev *hdev = vport->back;
7470
7471         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7472                 if (vlan->hd_tbl_status)
7473                         hclge_set_vlan_filter_hw(hdev,
7474                                                  htons(ETH_P_8021Q),
7475                                                  vport->vport_id,
7476                                                  vlan->vlan_id, 0,
7477                                                  true);
7478
7479                 vlan->hd_tbl_status = false;
7480                 if (is_del_list) {
7481                         list_del(&vlan->node);
7482                         kfree(vlan);
7483                 }
7484         }
7485 }
7486
7487 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7488 {
7489         struct hclge_vport_vlan_cfg *vlan, *tmp;
7490         struct hclge_vport *vport;
7491         int i;
7492
7493         mutex_lock(&hdev->vport_cfg_mutex);
7494         for (i = 0; i < hdev->num_alloc_vport; i++) {
7495                 vport = &hdev->vport[i];
7496                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7497                         list_del(&vlan->node);
7498                         kfree(vlan);
7499                 }
7500         }
7501         mutex_unlock(&hdev->vport_cfg_mutex);
7502 }
7503
7504 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7505 {
7506         struct hclge_vport *vport = hclge_get_vport(handle);
7507         struct hclge_vport_vlan_cfg *vlan, *tmp;
7508         struct hclge_dev *hdev = vport->back;
7509         u16 vlan_proto, qos;
7510         u16 state, vlan_id;
7511         int i;
7512
7513         mutex_lock(&hdev->vport_cfg_mutex);
7514         for (i = 0; i < hdev->num_alloc_vport; i++) {
7515                 vport = &hdev->vport[i];
7516                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7517                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7518                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7519                 state = vport->port_base_vlan_cfg.state;
7520
7521                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7522                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7523                                                  vport->vport_id, vlan_id, qos,
7524                                                  false);
7525                         continue;
7526                 }
7527
7528                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7529                         if (vlan->hd_tbl_status)
7530                                 hclge_set_vlan_filter_hw(hdev,
7531                                                          htons(ETH_P_8021Q),
7532                                                          vport->vport_id,
7533                                                          vlan->vlan_id, 0,
7534                                                          false);
7535                 }
7536         }
7537
7538         mutex_unlock(&hdev->vport_cfg_mutex);
7539 }
7540
7541 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7542 {
7543         struct hclge_vport *vport = hclge_get_vport(handle);
7544
7545         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7546                 vport->rxvlan_cfg.strip_tag1_en = false;
7547                 vport->rxvlan_cfg.strip_tag2_en = enable;
7548         } else {
7549                 vport->rxvlan_cfg.strip_tag1_en = enable;
7550                 vport->rxvlan_cfg.strip_tag2_en = true;
7551         }
7552         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7553         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7554         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7555
7556         return hclge_set_vlan_rx_offload_cfg(vport);
7557 }
7558
7559 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7560                                             u16 port_base_vlan_state,
7561                                             struct hclge_vlan_info *new_info,
7562                                             struct hclge_vlan_info *old_info)
7563 {
7564         struct hclge_dev *hdev = vport->back;
7565         int ret;
7566
7567         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7568                 hclge_rm_vport_all_vlan_table(vport, false);
7569                 return hclge_set_vlan_filter_hw(hdev,
7570                                                  htons(new_info->vlan_proto),
7571                                                  vport->vport_id,
7572                                                  new_info->vlan_tag,
7573                                                  new_info->qos, false);
7574         }
7575
7576         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7577                                        vport->vport_id, old_info->vlan_tag,
7578                                        old_info->qos, true);
7579         if (ret)
7580                 return ret;
7581
7582         return hclge_add_vport_all_vlan_table(vport);
7583 }
7584
7585 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7586                                     struct hclge_vlan_info *vlan_info)
7587 {
7588         struct hnae3_handle *nic = &vport->nic;
7589         struct hclge_vlan_info *old_vlan_info;
7590         struct hclge_dev *hdev = vport->back;
7591         int ret;
7592
7593         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7594
7595         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7596         if (ret)
7597                 return ret;
7598
7599         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7600                 /* add new VLAN tag */
7601                 ret = hclge_set_vlan_filter_hw(hdev,
7602                                                htons(vlan_info->vlan_proto),
7603                                                vport->vport_id,
7604                                                vlan_info->vlan_tag,
7605                                                vlan_info->qos, false);
7606                 if (ret)
7607                         return ret;
7608
7609                 /* remove old VLAN tag */
7610                 ret = hclge_set_vlan_filter_hw(hdev,
7611                                                htons(old_vlan_info->vlan_proto),
7612                                                vport->vport_id,
7613                                                old_vlan_info->vlan_tag,
7614                                                old_vlan_info->qos, true);
7615                 if (ret)
7616                         return ret;
7617
7618                 goto update;
7619         }
7620
7621         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7622                                                old_vlan_info);
7623         if (ret)
7624                 return ret;
7625
7626         /* update state only when disable/enable port based VLAN */
7627         vport->port_base_vlan_cfg.state = state;
7628         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7629                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7630         else
7631                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7632
7633 update:
7634         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7635         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7636         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7637
7638         return 0;
7639 }
7640
7641 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7642                                           enum hnae3_port_base_vlan_state state,
7643                                           u16 vlan)
7644 {
7645         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7646                 if (!vlan)
7647                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7648                 else
7649                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7650         } else {
7651                 if (!vlan)
7652                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7653                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7654                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7655                 else
7656                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7657         }
7658 }
7659
7660 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7661                                     u16 vlan, u8 qos, __be16 proto)
7662 {
7663         struct hclge_vport *vport = hclge_get_vport(handle);
7664         struct hclge_dev *hdev = vport->back;
7665         struct hclge_vlan_info vlan_info;
7666         u16 state;
7667         int ret;
7668
7669         if (hdev->pdev->revision == 0x20)
7670                 return -EOPNOTSUPP;
7671
7672         /* qos is a 3 bits value, so can not be bigger than 7 */
7673         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7674                 return -EINVAL;
7675         if (proto != htons(ETH_P_8021Q))
7676                 return -EPROTONOSUPPORT;
7677
7678         vport = &hdev->vport[vfid];
7679         state = hclge_get_port_base_vlan_state(vport,
7680                                                vport->port_base_vlan_cfg.state,
7681                                                vlan);
7682         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7683                 return 0;
7684
7685         vlan_info.vlan_tag = vlan;
7686         vlan_info.qos = qos;
7687         vlan_info.vlan_proto = ntohs(proto);
7688
7689         /* update port based VLAN for PF */
7690         if (!vfid) {
7691                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7692                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7693                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7694
7695                 return ret;
7696         }
7697
7698         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7699                 return hclge_update_port_base_vlan_cfg(vport, state,
7700                                                        &vlan_info);
7701         } else {
7702                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7703                                                         (u8)vfid, state,
7704                                                         vlan, qos,
7705                                                         ntohs(proto));
7706                 return ret;
7707         }
7708 }
7709
7710 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7711                           u16 vlan_id, bool is_kill)
7712 {
7713         struct hclge_vport *vport = hclge_get_vport(handle);
7714         struct hclge_dev *hdev = vport->back;
7715         bool writen_to_tbl = false;
7716         int ret = 0;
7717
7718         /* when port based VLAN enabled, we use port based VLAN as the VLAN
7719          * filter entry. In this case, we don't update VLAN filter table
7720          * when user add new VLAN or remove exist VLAN, just update the vport
7721          * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7722          * table until port based VLAN disabled
7723          */
7724         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7725                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7726                                                vlan_id, 0, is_kill);
7727                 writen_to_tbl = true;
7728         }
7729
7730         if (ret)
7731                 return ret;
7732
7733         if (is_kill)
7734                 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7735         else
7736                 hclge_add_vport_vlan_table(vport, vlan_id,
7737                                            writen_to_tbl);
7738
7739         return 0;
7740 }
7741
7742 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7743 {
7744         struct hclge_config_max_frm_size_cmd *req;
7745         struct hclge_desc desc;
7746
7747         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7748
7749         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7750         req->max_frm_size = cpu_to_le16(new_mps);
7751         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7752
7753         return hclge_cmd_send(&hdev->hw, &desc, 1);
7754 }
7755
7756 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7757 {
7758         struct hclge_vport *vport = hclge_get_vport(handle);
7759
7760         return hclge_set_vport_mtu(vport, new_mtu);
7761 }
7762
7763 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7764 {
7765         struct hclge_dev *hdev = vport->back;
7766         int i, max_frm_size, ret;
7767
7768         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7769         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7770             max_frm_size > HCLGE_MAC_MAX_FRAME)
7771                 return -EINVAL;
7772
7773         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7774         mutex_lock(&hdev->vport_lock);
7775         /* VF's mps must fit within hdev->mps */
7776         if (vport->vport_id && max_frm_size > hdev->mps) {
7777                 mutex_unlock(&hdev->vport_lock);
7778                 return -EINVAL;
7779         } else if (vport->vport_id) {
7780                 vport->mps = max_frm_size;
7781                 mutex_unlock(&hdev->vport_lock);
7782                 return 0;
7783         }
7784
7785         /* PF's mps must be greater then VF's mps */
7786         for (i = 1; i < hdev->num_alloc_vport; i++)
7787                 if (max_frm_size < hdev->vport[i].mps) {
7788                         mutex_unlock(&hdev->vport_lock);
7789                         return -EINVAL;
7790                 }
7791
7792         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7793
7794         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7795         if (ret) {
7796                 dev_err(&hdev->pdev->dev,
7797                         "Change mtu fail, ret =%d\n", ret);
7798                 goto out;
7799         }
7800
7801         hdev->mps = max_frm_size;
7802         vport->mps = max_frm_size;
7803
7804         ret = hclge_buffer_alloc(hdev);
7805         if (ret)
7806                 dev_err(&hdev->pdev->dev,
7807                         "Allocate buffer fail, ret =%d\n", ret);
7808
7809 out:
7810         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7811         mutex_unlock(&hdev->vport_lock);
7812         return ret;
7813 }
7814
7815 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7816                                     bool enable)
7817 {
7818         struct hclge_reset_tqp_queue_cmd *req;
7819         struct hclge_desc desc;
7820         int ret;
7821
7822         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7823
7824         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7825         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7826         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7827
7828         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7829         if (ret) {
7830                 dev_err(&hdev->pdev->dev,
7831                         "Send tqp reset cmd error, status =%d\n", ret);
7832                 return ret;
7833         }
7834
7835         return 0;
7836 }
7837
7838 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7839 {
7840         struct hclge_reset_tqp_queue_cmd *req;
7841         struct hclge_desc desc;
7842         int ret;
7843
7844         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7845
7846         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7847         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7848
7849         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7850         if (ret) {
7851                 dev_err(&hdev->pdev->dev,
7852                         "Get reset status error, status =%d\n", ret);
7853                 return ret;
7854         }
7855
7856         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7857 }
7858
7859 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7860 {
7861         struct hnae3_queue *queue;
7862         struct hclge_tqp *tqp;
7863
7864         queue = handle->kinfo.tqp[queue_id];
7865         tqp = container_of(queue, struct hclge_tqp, q);
7866
7867         return tqp->index;
7868 }
7869
7870 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7871 {
7872         struct hclge_vport *vport = hclge_get_vport(handle);
7873         struct hclge_dev *hdev = vport->back;
7874         int reset_try_times = 0;
7875         int reset_status;
7876         u16 queue_gid;
7877         int ret;
7878
7879         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7880
7881         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7882         if (ret) {
7883                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7884                 return ret;
7885         }
7886
7887         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7888         if (ret) {
7889                 dev_err(&hdev->pdev->dev,
7890                         "Send reset tqp cmd fail, ret = %d\n", ret);
7891                 return ret;
7892         }
7893
7894         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7895                 /* Wait for tqp hw reset */
7896                 msleep(20);
7897                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7898                 if (reset_status)
7899                         break;
7900         }
7901
7902         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7903                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7904                 return ret;
7905         }
7906
7907         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7908         if (ret)
7909                 dev_err(&hdev->pdev->dev,
7910                         "Deassert the soft reset fail, ret = %d\n", ret);
7911
7912         return ret;
7913 }
7914
7915 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7916 {
7917         struct hclge_dev *hdev = vport->back;
7918         int reset_try_times = 0;
7919         int reset_status;
7920         u16 queue_gid;
7921         int ret;
7922
7923         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7924
7925         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7926         if (ret) {
7927                 dev_warn(&hdev->pdev->dev,
7928                          "Send reset tqp cmd fail, ret = %d\n", ret);
7929                 return;
7930         }
7931
7932         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7933                 /* Wait for tqp hw reset */
7934                 msleep(20);
7935                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7936                 if (reset_status)
7937                         break;
7938         }
7939
7940         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7941                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7942                 return;
7943         }
7944
7945         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7946         if (ret)
7947                 dev_warn(&hdev->pdev->dev,
7948                          "Deassert the soft reset fail, ret = %d\n", ret);
7949 }
7950
7951 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7952 {
7953         struct hclge_vport *vport = hclge_get_vport(handle);
7954         struct hclge_dev *hdev = vport->back;
7955
7956         return hdev->fw_version;
7957 }
7958
7959 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7960 {
7961         struct phy_device *phydev = hdev->hw.mac.phydev;
7962
7963         if (!phydev)
7964                 return;
7965
7966         phy_set_asym_pause(phydev, rx_en, tx_en);
7967 }
7968
7969 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7970 {
7971         int ret;
7972
7973         if (rx_en && tx_en)
7974                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7975         else if (rx_en && !tx_en)
7976                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7977         else if (!rx_en && tx_en)
7978                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7979         else
7980                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7981
7982         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7983                 return 0;
7984
7985         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7986         if (ret) {
7987                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7988                         ret);
7989                 return ret;
7990         }
7991
7992         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7993
7994         return 0;
7995 }
7996
7997 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7998 {
7999         struct phy_device *phydev = hdev->hw.mac.phydev;
8000         u16 remote_advertising = 0;
8001         u16 local_advertising;
8002         u32 rx_pause, tx_pause;
8003         u8 flowctl;
8004
8005         if (!phydev->link || !phydev->autoneg)
8006                 return 0;
8007
8008         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8009
8010         if (phydev->pause)
8011                 remote_advertising = LPA_PAUSE_CAP;
8012
8013         if (phydev->asym_pause)
8014                 remote_advertising |= LPA_PAUSE_ASYM;
8015
8016         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8017                                            remote_advertising);
8018         tx_pause = flowctl & FLOW_CTRL_TX;
8019         rx_pause = flowctl & FLOW_CTRL_RX;
8020
8021         if (phydev->duplex == HCLGE_MAC_HALF) {
8022                 tx_pause = 0;
8023                 rx_pause = 0;
8024         }
8025
8026         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8027 }
8028
8029 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8030                                  u32 *rx_en, u32 *tx_en)
8031 {
8032         struct hclge_vport *vport = hclge_get_vport(handle);
8033         struct hclge_dev *hdev = vport->back;
8034
8035         *auto_neg = hclge_get_autoneg(handle);
8036
8037         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8038                 *rx_en = 0;
8039                 *tx_en = 0;
8040                 return;
8041         }
8042
8043         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8044                 *rx_en = 1;
8045                 *tx_en = 0;
8046         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8047                 *tx_en = 1;
8048                 *rx_en = 0;
8049         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8050                 *rx_en = 1;
8051                 *tx_en = 1;
8052         } else {
8053                 *rx_en = 0;
8054                 *tx_en = 0;
8055         }
8056 }
8057
8058 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8059                                 u32 rx_en, u32 tx_en)
8060 {
8061         struct hclge_vport *vport = hclge_get_vport(handle);
8062         struct hclge_dev *hdev = vport->back;
8063         struct phy_device *phydev = hdev->hw.mac.phydev;
8064         u32 fc_autoneg;
8065
8066         fc_autoneg = hclge_get_autoneg(handle);
8067         if (auto_neg != fc_autoneg) {
8068                 dev_info(&hdev->pdev->dev,
8069                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8070                 return -EOPNOTSUPP;
8071         }
8072
8073         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8074                 dev_info(&hdev->pdev->dev,
8075                          "Priority flow control enabled. Cannot set link flow control.\n");
8076                 return -EOPNOTSUPP;
8077         }
8078
8079         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8080
8081         if (!fc_autoneg)
8082                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8083
8084         if (phydev)
8085                 return phy_start_aneg(phydev);
8086
8087         if (hdev->pdev->revision == 0x20)
8088                 return -EOPNOTSUPP;
8089
8090         return hclge_restart_autoneg(handle);
8091 }
8092
8093 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8094                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8095 {
8096         struct hclge_vport *vport = hclge_get_vport(handle);
8097         struct hclge_dev *hdev = vport->back;
8098
8099         if (speed)
8100                 *speed = hdev->hw.mac.speed;
8101         if (duplex)
8102                 *duplex = hdev->hw.mac.duplex;
8103         if (auto_neg)
8104                 *auto_neg = hdev->hw.mac.autoneg;
8105 }
8106
8107 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8108                                  u8 *module_type)
8109 {
8110         struct hclge_vport *vport = hclge_get_vport(handle);
8111         struct hclge_dev *hdev = vport->back;
8112
8113         if (media_type)
8114                 *media_type = hdev->hw.mac.media_type;
8115
8116         if (module_type)
8117                 *module_type = hdev->hw.mac.module_type;
8118 }
8119
8120 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8121                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8122 {
8123         struct hclge_vport *vport = hclge_get_vport(handle);
8124         struct hclge_dev *hdev = vport->back;
8125         struct phy_device *phydev = hdev->hw.mac.phydev;
8126         int mdix_ctrl, mdix, retval, is_resolved;
8127
8128         if (!phydev) {
8129                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8130                 *tp_mdix = ETH_TP_MDI_INVALID;
8131                 return;
8132         }
8133
8134         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8135
8136         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8137         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8138                                     HCLGE_PHY_MDIX_CTRL_S);
8139
8140         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8141         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8142         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8143
8144         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8145
8146         switch (mdix_ctrl) {
8147         case 0x0:
8148                 *tp_mdix_ctrl = ETH_TP_MDI;
8149                 break;
8150         case 0x1:
8151                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8152                 break;
8153         case 0x3:
8154                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8155                 break;
8156         default:
8157                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8158                 break;
8159         }
8160
8161         if (!is_resolved)
8162                 *tp_mdix = ETH_TP_MDI_INVALID;
8163         else if (mdix)
8164                 *tp_mdix = ETH_TP_MDI_X;
8165         else
8166                 *tp_mdix = ETH_TP_MDI;
8167 }
8168
8169 static void hclge_info_show(struct hclge_dev *hdev)
8170 {
8171         struct device *dev = &hdev->pdev->dev;
8172
8173         dev_info(dev, "PF info begin:\n");
8174
8175         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8176         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8177         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8178         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8179         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8180         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8181         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8182         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8183         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8184         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8185         dev_info(dev, "This is %s PF\n",
8186                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8187         dev_info(dev, "DCB %s\n",
8188                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8189         dev_info(dev, "MQPRIO %s\n",
8190                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8191
8192         dev_info(dev, "PF info end.\n");
8193 }
8194
8195 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8196                                           struct hclge_vport *vport)
8197 {
8198         struct hnae3_client *client = vport->nic.client;
8199         struct hclge_dev *hdev = ae_dev->priv;
8200         int ret;
8201
8202         ret = client->ops->init_instance(&vport->nic);
8203         if (ret)
8204                 return ret;
8205
8206         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8207         hnae3_set_client_init_flag(client, ae_dev, 1);
8208
8209         /* Enable nic hw error interrupts */
8210         ret = hclge_config_nic_hw_error(hdev, true);
8211         if (ret)
8212                 dev_err(&ae_dev->pdev->dev,
8213                         "fail(%d) to enable hw error interrupts\n", ret);
8214
8215         if (netif_msg_drv(&hdev->vport->nic))
8216                 hclge_info_show(hdev);
8217
8218         return ret;
8219 }
8220
8221 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8222                                            struct hclge_vport *vport)
8223 {
8224         struct hnae3_client *client = vport->roce.client;
8225         struct hclge_dev *hdev = ae_dev->priv;
8226         int ret;
8227
8228         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8229             !hdev->nic_client)
8230                 return 0;
8231
8232         client = hdev->roce_client;
8233         ret = hclge_init_roce_base_info(vport);
8234         if (ret)
8235                 return ret;
8236
8237         ret = client->ops->init_instance(&vport->roce);
8238         if (ret)
8239                 return ret;
8240
8241         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8242         hnae3_set_client_init_flag(client, ae_dev, 1);
8243
8244         return 0;
8245 }
8246
8247 static int hclge_init_client_instance(struct hnae3_client *client,
8248                                       struct hnae3_ae_dev *ae_dev)
8249 {
8250         struct hclge_dev *hdev = ae_dev->priv;
8251         struct hclge_vport *vport;
8252         int i, ret;
8253
8254         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8255                 vport = &hdev->vport[i];
8256
8257                 switch (client->type) {
8258                 case HNAE3_CLIENT_KNIC:
8259
8260                         hdev->nic_client = client;
8261                         vport->nic.client = client;
8262                         ret = hclge_init_nic_client_instance(ae_dev, vport);
8263                         if (ret)
8264                                 goto clear_nic;
8265
8266                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8267                         if (ret)
8268                                 goto clear_roce;
8269
8270                         break;
8271                 case HNAE3_CLIENT_ROCE:
8272                         if (hnae3_dev_roce_supported(hdev)) {
8273                                 hdev->roce_client = client;
8274                                 vport->roce.client = client;
8275                         }
8276
8277                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8278                         if (ret)
8279                                 goto clear_roce;
8280
8281                         break;
8282                 default:
8283                         return -EINVAL;
8284                 }
8285         }
8286
8287         /* Enable roce ras interrupts */
8288         ret = hclge_config_rocee_ras_interrupt(hdev, true);
8289         if (ret)
8290                 dev_err(&ae_dev->pdev->dev,
8291                         "fail(%d) to enable roce ras interrupts\n", ret);
8292
8293         return ret;
8294
8295 clear_nic:
8296         hdev->nic_client = NULL;
8297         vport->nic.client = NULL;
8298         return ret;
8299 clear_roce:
8300         hdev->roce_client = NULL;
8301         vport->roce.client = NULL;
8302         return ret;
8303 }
8304
8305 static void hclge_uninit_client_instance(struct hnae3_client *client,
8306                                          struct hnae3_ae_dev *ae_dev)
8307 {
8308         struct hclge_dev *hdev = ae_dev->priv;
8309         struct hclge_vport *vport;
8310         int i;
8311
8312         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8313                 vport = &hdev->vport[i];
8314                 if (hdev->roce_client) {
8315                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8316                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8317                                                                 0);
8318                         hdev->roce_client = NULL;
8319                         vport->roce.client = NULL;
8320                 }
8321                 if (client->type == HNAE3_CLIENT_ROCE)
8322                         return;
8323                 if (hdev->nic_client && client->ops->uninit_instance) {
8324                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8325                         client->ops->uninit_instance(&vport->nic, 0);
8326                         hdev->nic_client = NULL;
8327                         vport->nic.client = NULL;
8328                 }
8329         }
8330 }
8331
8332 static int hclge_pci_init(struct hclge_dev *hdev)
8333 {
8334         struct pci_dev *pdev = hdev->pdev;
8335         struct hclge_hw *hw;
8336         int ret;
8337
8338         ret = pci_enable_device(pdev);
8339         if (ret) {
8340                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8341                 return ret;
8342         }
8343
8344         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8345         if (ret) {
8346                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8347                 if (ret) {
8348                         dev_err(&pdev->dev,
8349                                 "can't set consistent PCI DMA");
8350                         goto err_disable_device;
8351                 }
8352                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8353         }
8354
8355         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8356         if (ret) {
8357                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8358                 goto err_disable_device;
8359         }
8360
8361         pci_set_master(pdev);
8362         hw = &hdev->hw;
8363         hw->io_base = pcim_iomap(pdev, 2, 0);
8364         if (!hw->io_base) {
8365                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8366                 ret = -ENOMEM;
8367                 goto err_clr_master;
8368         }
8369
8370         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8371
8372         return 0;
8373 err_clr_master:
8374         pci_clear_master(pdev);
8375         pci_release_regions(pdev);
8376 err_disable_device:
8377         pci_disable_device(pdev);
8378
8379         return ret;
8380 }
8381
8382 static void hclge_pci_uninit(struct hclge_dev *hdev)
8383 {
8384         struct pci_dev *pdev = hdev->pdev;
8385
8386         pcim_iounmap(pdev, hdev->hw.io_base);
8387         pci_free_irq_vectors(pdev);
8388         pci_clear_master(pdev);
8389         pci_release_mem_regions(pdev);
8390         pci_disable_device(pdev);
8391 }
8392
8393 static void hclge_state_init(struct hclge_dev *hdev)
8394 {
8395         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8396         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8397         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8398         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8399         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8400         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8401 }
8402
8403 static void hclge_state_uninit(struct hclge_dev *hdev)
8404 {
8405         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8406         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8407
8408         if (hdev->service_timer.function)
8409                 del_timer_sync(&hdev->service_timer);
8410         if (hdev->reset_timer.function)
8411                 del_timer_sync(&hdev->reset_timer);
8412         if (hdev->service_task.func)
8413                 cancel_work_sync(&hdev->service_task);
8414         if (hdev->rst_service_task.func)
8415                 cancel_work_sync(&hdev->rst_service_task);
8416         if (hdev->mbx_service_task.func)
8417                 cancel_work_sync(&hdev->mbx_service_task);
8418 }
8419
8420 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8421 {
8422 #define HCLGE_FLR_WAIT_MS       100
8423 #define HCLGE_FLR_WAIT_CNT      50
8424         struct hclge_dev *hdev = ae_dev->priv;
8425         int cnt = 0;
8426
8427         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8428         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8429         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8430         hclge_reset_event(hdev->pdev, NULL);
8431
8432         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8433                cnt++ < HCLGE_FLR_WAIT_CNT)
8434                 msleep(HCLGE_FLR_WAIT_MS);
8435
8436         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8437                 dev_err(&hdev->pdev->dev,
8438                         "flr wait down timeout: %d\n", cnt);
8439 }
8440
8441 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8442 {
8443         struct hclge_dev *hdev = ae_dev->priv;
8444
8445         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8446 }
8447
8448 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8449 {
8450         struct pci_dev *pdev = ae_dev->pdev;
8451         struct hclge_dev *hdev;
8452         int ret;
8453
8454         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8455         if (!hdev) {
8456                 ret = -ENOMEM;
8457                 goto out;
8458         }
8459
8460         hdev->pdev = pdev;
8461         hdev->ae_dev = ae_dev;
8462         hdev->reset_type = HNAE3_NONE_RESET;
8463         hdev->reset_level = HNAE3_FUNC_RESET;
8464         ae_dev->priv = hdev;
8465         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8466
8467         mutex_init(&hdev->vport_lock);
8468         mutex_init(&hdev->vport_cfg_mutex);
8469         spin_lock_init(&hdev->fd_rule_lock);
8470
8471         ret = hclge_pci_init(hdev);
8472         if (ret) {
8473                 dev_err(&pdev->dev, "PCI init failed\n");
8474                 goto out;
8475         }
8476
8477         /* Firmware command queue initialize */
8478         ret = hclge_cmd_queue_init(hdev);
8479         if (ret) {
8480                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8481                 goto err_pci_uninit;
8482         }
8483
8484         /* Firmware command initialize */
8485         ret = hclge_cmd_init(hdev);
8486         if (ret)
8487                 goto err_cmd_uninit;
8488
8489         ret = hclge_get_cap(hdev);
8490         if (ret) {
8491                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8492                         ret);
8493                 goto err_cmd_uninit;
8494         }
8495
8496         ret = hclge_configure(hdev);
8497         if (ret) {
8498                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8499                 goto err_cmd_uninit;
8500         }
8501
8502         ret = hclge_init_msi(hdev);
8503         if (ret) {
8504                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8505                 goto err_cmd_uninit;
8506         }
8507
8508         ret = hclge_misc_irq_init(hdev);
8509         if (ret) {
8510                 dev_err(&pdev->dev,
8511                         "Misc IRQ(vector0) init error, ret = %d.\n",
8512                         ret);
8513                 goto err_msi_uninit;
8514         }
8515
8516         ret = hclge_alloc_tqps(hdev);
8517         if (ret) {
8518                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8519                 goto err_msi_irq_uninit;
8520         }
8521
8522         ret = hclge_alloc_vport(hdev);
8523         if (ret) {
8524                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8525                 goto err_msi_irq_uninit;
8526         }
8527
8528         ret = hclge_map_tqp(hdev);
8529         if (ret) {
8530                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8531                 goto err_msi_irq_uninit;
8532         }
8533
8534         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8535                 ret = hclge_mac_mdio_config(hdev);
8536                 if (ret) {
8537                         dev_err(&hdev->pdev->dev,
8538                                 "mdio config fail ret=%d\n", ret);
8539                         goto err_msi_irq_uninit;
8540                 }
8541         }
8542
8543         ret = hclge_init_umv_space(hdev);
8544         if (ret) {
8545                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8546                 goto err_mdiobus_unreg;
8547         }
8548
8549         ret = hclge_mac_init(hdev);
8550         if (ret) {
8551                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8552                 goto err_mdiobus_unreg;
8553         }
8554
8555         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8556         if (ret) {
8557                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8558                 goto err_mdiobus_unreg;
8559         }
8560
8561         ret = hclge_config_gro(hdev, true);
8562         if (ret)
8563                 goto err_mdiobus_unreg;
8564
8565         ret = hclge_init_vlan_config(hdev);
8566         if (ret) {
8567                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8568                 goto err_mdiobus_unreg;
8569         }
8570
8571         ret = hclge_tm_schd_init(hdev);
8572         if (ret) {
8573                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8574                 goto err_mdiobus_unreg;
8575         }
8576
8577         hclge_rss_init_cfg(hdev);
8578         ret = hclge_rss_init_hw(hdev);
8579         if (ret) {
8580                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8581                 goto err_mdiobus_unreg;
8582         }
8583
8584         ret = init_mgr_tbl(hdev);
8585         if (ret) {
8586                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8587                 goto err_mdiobus_unreg;
8588         }
8589
8590         ret = hclge_init_fd_config(hdev);
8591         if (ret) {
8592                 dev_err(&pdev->dev,
8593                         "fd table init fail, ret=%d\n", ret);
8594                 goto err_mdiobus_unreg;
8595         }
8596
8597         INIT_KFIFO(hdev->mac_tnl_log);
8598
8599         hclge_dcb_ops_set(hdev);
8600
8601         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8602         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8603         INIT_WORK(&hdev->service_task, hclge_service_task);
8604         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8605         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8606
8607         hclge_clear_all_event_cause(hdev);
8608
8609         /* Enable MISC vector(vector0) */
8610         hclge_enable_vector(&hdev->misc_vector, true);
8611
8612         hclge_state_init(hdev);
8613         hdev->last_reset_time = jiffies;
8614
8615         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8616         return 0;
8617
8618 err_mdiobus_unreg:
8619         if (hdev->hw.mac.phydev)
8620                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8621 err_msi_irq_uninit:
8622         hclge_misc_irq_uninit(hdev);
8623 err_msi_uninit:
8624         pci_free_irq_vectors(pdev);
8625 err_cmd_uninit:
8626         hclge_cmd_uninit(hdev);
8627 err_pci_uninit:
8628         pcim_iounmap(pdev, hdev->hw.io_base);
8629         pci_clear_master(pdev);
8630         pci_release_regions(pdev);
8631         pci_disable_device(pdev);
8632 out:
8633         return ret;
8634 }
8635
8636 static void hclge_stats_clear(struct hclge_dev *hdev)
8637 {
8638         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8639 }
8640
8641 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8642 {
8643         struct hclge_vport *vport = hdev->vport;
8644         int i;
8645
8646         for (i = 0; i < hdev->num_alloc_vport; i++) {
8647                 hclge_vport_stop(vport);
8648                 vport++;
8649         }
8650 }
8651
8652 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8653 {
8654         struct hclge_dev *hdev = ae_dev->priv;
8655         struct pci_dev *pdev = ae_dev->pdev;
8656         int ret;
8657
8658         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8659
8660         hclge_stats_clear(hdev);
8661         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8662         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8663
8664         ret = hclge_cmd_init(hdev);
8665         if (ret) {
8666                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8667                 return ret;
8668         }
8669
8670         ret = hclge_map_tqp(hdev);
8671         if (ret) {
8672                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8673                 return ret;
8674         }
8675
8676         hclge_reset_umv_space(hdev);
8677
8678         ret = hclge_mac_init(hdev);
8679         if (ret) {
8680                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8681                 return ret;
8682         }
8683
8684         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8685         if (ret) {
8686                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8687                 return ret;
8688         }
8689
8690         ret = hclge_config_gro(hdev, true);
8691         if (ret)
8692                 return ret;
8693
8694         ret = hclge_init_vlan_config(hdev);
8695         if (ret) {
8696                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8697                 return ret;
8698         }
8699
8700         ret = hclge_tm_init_hw(hdev, true);
8701         if (ret) {
8702                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8703                 return ret;
8704         }
8705
8706         ret = hclge_rss_init_hw(hdev);
8707         if (ret) {
8708                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8709                 return ret;
8710         }
8711
8712         ret = hclge_init_fd_config(hdev);
8713         if (ret) {
8714                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
8715                 return ret;
8716         }
8717
8718         /* Re-enable the hw error interrupts because
8719          * the interrupts get disabled on global reset.
8720          */
8721         ret = hclge_config_nic_hw_error(hdev, true);
8722         if (ret) {
8723                 dev_err(&pdev->dev,
8724                         "fail(%d) to re-enable NIC hw error interrupts\n",
8725                         ret);
8726                 return ret;
8727         }
8728
8729         if (hdev->roce_client) {
8730                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8731                 if (ret) {
8732                         dev_err(&pdev->dev,
8733                                 "fail(%d) to re-enable roce ras interrupts\n",
8734                                 ret);
8735                         return ret;
8736                 }
8737         }
8738
8739         hclge_reset_vport_state(hdev);
8740
8741         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8742                  HCLGE_DRIVER_NAME);
8743
8744         return 0;
8745 }
8746
8747 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8748 {
8749         struct hclge_dev *hdev = ae_dev->priv;
8750         struct hclge_mac *mac = &hdev->hw.mac;
8751
8752         hclge_state_uninit(hdev);
8753
8754         if (mac->phydev)
8755                 mdiobus_unregister(mac->mdio_bus);
8756
8757         hclge_uninit_umv_space(hdev);
8758
8759         /* Disable MISC vector(vector0) */
8760         hclge_enable_vector(&hdev->misc_vector, false);
8761         synchronize_irq(hdev->misc_vector.vector_irq);
8762
8763         /* Disable all hw interrupts */
8764         hclge_config_mac_tnl_int(hdev, false);
8765         hclge_config_nic_hw_error(hdev, false);
8766         hclge_config_rocee_ras_interrupt(hdev, false);
8767
8768         hclge_cmd_uninit(hdev);
8769         hclge_misc_irq_uninit(hdev);
8770         hclge_pci_uninit(hdev);
8771         mutex_destroy(&hdev->vport_lock);
8772         hclge_uninit_vport_mac_table(hdev);
8773         hclge_uninit_vport_vlan_table(hdev);
8774         mutex_destroy(&hdev->vport_cfg_mutex);
8775         ae_dev->priv = NULL;
8776 }
8777
8778 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8779 {
8780         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8781         struct hclge_vport *vport = hclge_get_vport(handle);
8782         struct hclge_dev *hdev = vport->back;
8783
8784         return min_t(u32, hdev->rss_size_max,
8785                      vport->alloc_tqps / kinfo->num_tc);
8786 }
8787
8788 static void hclge_get_channels(struct hnae3_handle *handle,
8789                                struct ethtool_channels *ch)
8790 {
8791         ch->max_combined = hclge_get_max_channels(handle);
8792         ch->other_count = 1;
8793         ch->max_other = 1;
8794         ch->combined_count = handle->kinfo.rss_size;
8795 }
8796
8797 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8798                                         u16 *alloc_tqps, u16 *max_rss_size)
8799 {
8800         struct hclge_vport *vport = hclge_get_vport(handle);
8801         struct hclge_dev *hdev = vport->back;
8802
8803         *alloc_tqps = vport->alloc_tqps;
8804         *max_rss_size = hdev->rss_size_max;
8805 }
8806
8807 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8808                               bool rxfh_configured)
8809 {
8810         struct hclge_vport *vport = hclge_get_vport(handle);
8811         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8812         struct hclge_dev *hdev = vport->back;
8813         int cur_rss_size = kinfo->rss_size;
8814         int cur_tqps = kinfo->num_tqps;
8815         u16 tc_offset[HCLGE_MAX_TC_NUM];
8816         u16 tc_valid[HCLGE_MAX_TC_NUM];
8817         u16 tc_size[HCLGE_MAX_TC_NUM];
8818         u16 roundup_size;
8819         u32 *rss_indir;
8820         int ret, i;
8821
8822         kinfo->req_rss_size = new_tqps_num;
8823
8824         ret = hclge_tm_vport_map_update(hdev);
8825         if (ret) {
8826                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8827                 return ret;
8828         }
8829
8830         roundup_size = roundup_pow_of_two(kinfo->rss_size);
8831         roundup_size = ilog2(roundup_size);
8832         /* Set the RSS TC mode according to the new RSS size */
8833         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8834                 tc_valid[i] = 0;
8835
8836                 if (!(hdev->hw_tc_map & BIT(i)))
8837                         continue;
8838
8839                 tc_valid[i] = 1;
8840                 tc_size[i] = roundup_size;
8841                 tc_offset[i] = kinfo->rss_size * i;
8842         }
8843         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8844         if (ret)
8845                 return ret;
8846
8847         /* RSS indirection table has been configuared by user */
8848         if (rxfh_configured)
8849                 goto out;
8850
8851         /* Reinitializes the rss indirect table according to the new RSS size */
8852         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8853         if (!rss_indir)
8854                 return -ENOMEM;
8855
8856         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8857                 rss_indir[i] = i % kinfo->rss_size;
8858
8859         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8860         if (ret)
8861                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8862                         ret);
8863
8864         kfree(rss_indir);
8865
8866 out:
8867         if (!ret)
8868                 dev_info(&hdev->pdev->dev,
8869                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8870                          cur_rss_size, kinfo->rss_size,
8871                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
8872
8873         return ret;
8874 }
8875
8876 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8877                               u32 *regs_num_64_bit)
8878 {
8879         struct hclge_desc desc;
8880         u32 total_num;
8881         int ret;
8882
8883         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8884         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8885         if (ret) {
8886                 dev_err(&hdev->pdev->dev,
8887                         "Query register number cmd failed, ret = %d.\n", ret);
8888                 return ret;
8889         }
8890
8891         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8892         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8893
8894         total_num = *regs_num_32_bit + *regs_num_64_bit;
8895         if (!total_num)
8896                 return -EINVAL;
8897
8898         return 0;
8899 }
8900
8901 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8902                                  void *data)
8903 {
8904 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8905 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
8906
8907         struct hclge_desc *desc;
8908         u32 *reg_val = data;
8909         __le32 *desc_data;
8910         int nodata_num;
8911         int cmd_num;
8912         int i, k, n;
8913         int ret;
8914
8915         if (regs_num == 0)
8916                 return 0;
8917
8918         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
8919         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
8920                                HCLGE_32_BIT_REG_RTN_DATANUM);
8921         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8922         if (!desc)
8923                 return -ENOMEM;
8924
8925         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8926         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8927         if (ret) {
8928                 dev_err(&hdev->pdev->dev,
8929                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8930                 kfree(desc);
8931                 return ret;
8932         }
8933
8934         for (i = 0; i < cmd_num; i++) {
8935                 if (i == 0) {
8936                         desc_data = (__le32 *)(&desc[i].data[0]);
8937                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
8938                 } else {
8939                         desc_data = (__le32 *)(&desc[i]);
8940                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8941                 }
8942                 for (k = 0; k < n; k++) {
8943                         *reg_val++ = le32_to_cpu(*desc_data++);
8944
8945                         regs_num--;
8946                         if (!regs_num)
8947                                 break;
8948                 }
8949         }
8950
8951         kfree(desc);
8952         return 0;
8953 }
8954
8955 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8956                                  void *data)
8957 {
8958 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8959 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
8960
8961         struct hclge_desc *desc;
8962         u64 *reg_val = data;
8963         __le64 *desc_data;
8964         int nodata_len;
8965         int cmd_num;
8966         int i, k, n;
8967         int ret;
8968
8969         if (regs_num == 0)
8970                 return 0;
8971
8972         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
8973         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
8974                                HCLGE_64_BIT_REG_RTN_DATANUM);
8975         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8976         if (!desc)
8977                 return -ENOMEM;
8978
8979         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8980         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8981         if (ret) {
8982                 dev_err(&hdev->pdev->dev,
8983                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
8984                 kfree(desc);
8985                 return ret;
8986         }
8987
8988         for (i = 0; i < cmd_num; i++) {
8989                 if (i == 0) {
8990                         desc_data = (__le64 *)(&desc[i].data[0]);
8991                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
8992                 } else {
8993                         desc_data = (__le64 *)(&desc[i]);
8994                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
8995                 }
8996                 for (k = 0; k < n; k++) {
8997                         *reg_val++ = le64_to_cpu(*desc_data++);
8998
8999                         regs_num--;
9000                         if (!regs_num)
9001                                 break;
9002                 }
9003         }
9004
9005         kfree(desc);
9006         return 0;
9007 }
9008
9009 #define MAX_SEPARATE_NUM        4
9010 #define SEPARATOR_VALUE         0xFFFFFFFF
9011 #define REG_NUM_PER_LINE        4
9012 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
9013
9014 static int hclge_get_regs_len(struct hnae3_handle *handle)
9015 {
9016         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9017         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9018         struct hclge_vport *vport = hclge_get_vport(handle);
9019         struct hclge_dev *hdev = vport->back;
9020         u32 regs_num_32_bit, regs_num_64_bit;
9021         int ret;
9022
9023         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9024         if (ret) {
9025                 dev_err(&hdev->pdev->dev,
9026                         "Get register number failed, ret = %d.\n", ret);
9027                 return -EOPNOTSUPP;
9028         }
9029
9030         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9031         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9032         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9033         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9034
9035         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9036                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9037                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9038 }
9039
9040 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9041                            void *data)
9042 {
9043         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9044         struct hclge_vport *vport = hclge_get_vport(handle);
9045         struct hclge_dev *hdev = vport->back;
9046         u32 regs_num_32_bit, regs_num_64_bit;
9047         int i, j, reg_um, separator_num;
9048         u32 *reg = data;
9049         int ret;
9050
9051         *version = hdev->fw_version;
9052
9053         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9054         if (ret) {
9055                 dev_err(&hdev->pdev->dev,
9056                         "Get register number failed, ret = %d.\n", ret);
9057                 return;
9058         }
9059
9060         /* fetching per-PF registers valus from PF PCIe register space */
9061         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9062         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9063         for (i = 0; i < reg_um; i++)
9064                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9065         for (i = 0; i < separator_num; i++)
9066                 *reg++ = SEPARATOR_VALUE;
9067
9068         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9069         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9070         for (i = 0; i < reg_um; i++)
9071                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9072         for (i = 0; i < separator_num; i++)
9073                 *reg++ = SEPARATOR_VALUE;
9074
9075         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9076         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9077         for (j = 0; j < kinfo->num_tqps; j++) {
9078                 for (i = 0; i < reg_um; i++)
9079                         *reg++ = hclge_read_dev(&hdev->hw,
9080                                                 ring_reg_addr_list[i] +
9081                                                 0x200 * j);
9082                 for (i = 0; i < separator_num; i++)
9083                         *reg++ = SEPARATOR_VALUE;
9084         }
9085
9086         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9087         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9088         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9089                 for (i = 0; i < reg_um; i++)
9090                         *reg++ = hclge_read_dev(&hdev->hw,
9091                                                 tqp_intr_reg_addr_list[i] +
9092                                                 4 * j);
9093                 for (i = 0; i < separator_num; i++)
9094                         *reg++ = SEPARATOR_VALUE;
9095         }
9096
9097         /* fetching PF common registers values from firmware */
9098         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9099         if (ret) {
9100                 dev_err(&hdev->pdev->dev,
9101                         "Get 32 bit register failed, ret = %d.\n", ret);
9102                 return;
9103         }
9104
9105         reg += regs_num_32_bit;
9106         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9107         if (ret)
9108                 dev_err(&hdev->pdev->dev,
9109                         "Get 64 bit register failed, ret = %d.\n", ret);
9110 }
9111
9112 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9113 {
9114         struct hclge_set_led_state_cmd *req;
9115         struct hclge_desc desc;
9116         int ret;
9117
9118         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9119
9120         req = (struct hclge_set_led_state_cmd *)desc.data;
9121         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9122                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9123
9124         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9125         if (ret)
9126                 dev_err(&hdev->pdev->dev,
9127                         "Send set led state cmd error, ret =%d\n", ret);
9128
9129         return ret;
9130 }
9131
9132 enum hclge_led_status {
9133         HCLGE_LED_OFF,
9134         HCLGE_LED_ON,
9135         HCLGE_LED_NO_CHANGE = 0xFF,
9136 };
9137
9138 static int hclge_set_led_id(struct hnae3_handle *handle,
9139                             enum ethtool_phys_id_state status)
9140 {
9141         struct hclge_vport *vport = hclge_get_vport(handle);
9142         struct hclge_dev *hdev = vport->back;
9143
9144         switch (status) {
9145         case ETHTOOL_ID_ACTIVE:
9146                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9147         case ETHTOOL_ID_INACTIVE:
9148                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9149         default:
9150                 return -EINVAL;
9151         }
9152 }
9153
9154 static void hclge_get_link_mode(struct hnae3_handle *handle,
9155                                 unsigned long *supported,
9156                                 unsigned long *advertising)
9157 {
9158         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9159         struct hclge_vport *vport = hclge_get_vport(handle);
9160         struct hclge_dev *hdev = vport->back;
9161         unsigned int idx = 0;
9162
9163         for (; idx < size; idx++) {
9164                 supported[idx] = hdev->hw.mac.supported[idx];
9165                 advertising[idx] = hdev->hw.mac.advertising[idx];
9166         }
9167 }
9168
9169 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9170 {
9171         struct hclge_vport *vport = hclge_get_vport(handle);
9172         struct hclge_dev *hdev = vport->back;
9173
9174         return hclge_config_gro(hdev, enable);
9175 }
9176
9177 static const struct hnae3_ae_ops hclge_ops = {
9178         .init_ae_dev = hclge_init_ae_dev,
9179         .uninit_ae_dev = hclge_uninit_ae_dev,
9180         .flr_prepare = hclge_flr_prepare,
9181         .flr_done = hclge_flr_done,
9182         .init_client_instance = hclge_init_client_instance,
9183         .uninit_client_instance = hclge_uninit_client_instance,
9184         .map_ring_to_vector = hclge_map_ring_to_vector,
9185         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9186         .get_vector = hclge_get_vector,
9187         .put_vector = hclge_put_vector,
9188         .set_promisc_mode = hclge_set_promisc_mode,
9189         .set_loopback = hclge_set_loopback,
9190         .start = hclge_ae_start,
9191         .stop = hclge_ae_stop,
9192         .client_start = hclge_client_start,
9193         .client_stop = hclge_client_stop,
9194         .get_status = hclge_get_status,
9195         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9196         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9197         .get_media_type = hclge_get_media_type,
9198         .check_port_speed = hclge_check_port_speed,
9199         .get_fec = hclge_get_fec,
9200         .set_fec = hclge_set_fec,
9201         .get_rss_key_size = hclge_get_rss_key_size,
9202         .get_rss_indir_size = hclge_get_rss_indir_size,
9203         .get_rss = hclge_get_rss,
9204         .set_rss = hclge_set_rss,
9205         .set_rss_tuple = hclge_set_rss_tuple,
9206         .get_rss_tuple = hclge_get_rss_tuple,
9207         .get_tc_size = hclge_get_tc_size,
9208         .get_mac_addr = hclge_get_mac_addr,
9209         .set_mac_addr = hclge_set_mac_addr,
9210         .do_ioctl = hclge_do_ioctl,
9211         .add_uc_addr = hclge_add_uc_addr,
9212         .rm_uc_addr = hclge_rm_uc_addr,
9213         .add_mc_addr = hclge_add_mc_addr,
9214         .rm_mc_addr = hclge_rm_mc_addr,
9215         .set_autoneg = hclge_set_autoneg,
9216         .get_autoneg = hclge_get_autoneg,
9217         .restart_autoneg = hclge_restart_autoneg,
9218         .get_pauseparam = hclge_get_pauseparam,
9219         .set_pauseparam = hclge_set_pauseparam,
9220         .set_mtu = hclge_set_mtu,
9221         .reset_queue = hclge_reset_tqp,
9222         .get_stats = hclge_get_stats,
9223         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9224         .update_stats = hclge_update_stats,
9225         .get_strings = hclge_get_strings,
9226         .get_sset_count = hclge_get_sset_count,
9227         .get_fw_version = hclge_get_fw_version,
9228         .get_mdix_mode = hclge_get_mdix_mode,
9229         .enable_vlan_filter = hclge_enable_vlan_filter,
9230         .set_vlan_filter = hclge_set_vlan_filter,
9231         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9232         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9233         .reset_event = hclge_reset_event,
9234         .set_default_reset_request = hclge_set_def_reset_request,
9235         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9236         .set_channels = hclge_set_channels,
9237         .get_channels = hclge_get_channels,
9238         .get_regs_len = hclge_get_regs_len,
9239         .get_regs = hclge_get_regs,
9240         .set_led_id = hclge_set_led_id,
9241         .get_link_mode = hclge_get_link_mode,
9242         .add_fd_entry = hclge_add_fd_entry,
9243         .del_fd_entry = hclge_del_fd_entry,
9244         .del_all_fd_entries = hclge_del_all_fd_entries,
9245         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9246         .get_fd_rule_info = hclge_get_fd_rule_info,
9247         .get_fd_all_rules = hclge_get_all_rules,
9248         .restore_fd_rules = hclge_restore_fd_entries,
9249         .enable_fd = hclge_enable_fd,
9250         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9251         .dbg_run_cmd = hclge_dbg_run_cmd,
9252         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9253         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9254         .ae_dev_resetting = hclge_ae_dev_resetting,
9255         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9256         .set_gro_en = hclge_gro_en,
9257         .get_global_queue_id = hclge_covert_handle_qid_global,
9258         .set_timer_task = hclge_set_timer_task,
9259         .mac_connect_phy = hclge_mac_connect_phy,
9260         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9261         .restore_vlan_table = hclge_restore_vlan_table,
9262 };
9263
9264 static struct hnae3_ae_algo ae_algo = {
9265         .ops = &hclge_ops,
9266         .pdev_id_table = ae_algo_pci_tbl,
9267 };
9268
9269 static int hclge_init(void)
9270 {
9271         pr_info("%s is initializing\n", HCLGE_NAME);
9272
9273         hnae3_register_ae_algo(&ae_algo);
9274
9275         return 0;
9276 }
9277
9278 static void hclge_exit(void)
9279 {
9280         hnae3_unregister_ae_algo(&ae_algo);
9281 }
9282 module_init(hclge_init);
9283 module_exit(hclge_exit);
9284
9285 MODULE_LICENSE("GPL");
9286 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9287 MODULE_DESCRIPTION("HCLGE Driver");
9288 MODULE_VERSION(HCLGE_MOD_VERSION);