Merge branch 'hns3-imp-phys'
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26
27 #define HCLGE_NAME                      "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30
31 #define HCLGE_BUF_SIZE_UNIT     256U
32 #define HCLGE_BUF_MUL_BY        2
33 #define HCLGE_BUF_DIV_BY        2
34 #define NEED_RESERVE_TC_NUM     2
35 #define BUF_MAX_PERCENT         100
36 #define BUF_RESERVE_PERCENT     90
37
38 #define HCLGE_RESET_MAX_FAIL_CNT        5
39 #define HCLGE_RESET_SYNC_TIME           100
40 #define HCLGE_PF_RESET_SYNC_TIME        20
41 #define HCLGE_PF_RESET_SYNC_CNT         1500
42
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56
57 #define HCLGE_LINK_STATUS_MS    10
58
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67                                                    unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73
74 static struct hnae3_ae_algo ae_algo;
75
76 static struct workqueue_struct *hclge_wq;
77
78 static const struct pci_device_id ae_algo_pci_tbl[] = {
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
87         /* required last entry */
88         {0, }
89 };
90
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94                                          HCLGE_CMDQ_TX_ADDR_H_REG,
95                                          HCLGE_CMDQ_TX_DEPTH_REG,
96                                          HCLGE_CMDQ_TX_TAIL_REG,
97                                          HCLGE_CMDQ_TX_HEAD_REG,
98                                          HCLGE_CMDQ_RX_ADDR_L_REG,
99                                          HCLGE_CMDQ_RX_ADDR_H_REG,
100                                          HCLGE_CMDQ_RX_DEPTH_REG,
101                                          HCLGE_CMDQ_RX_TAIL_REG,
102                                          HCLGE_CMDQ_RX_HEAD_REG,
103                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
104                                          HCLGE_CMDQ_INTR_STS_REG,
105                                          HCLGE_CMDQ_INTR_EN_REG,
106                                          HCLGE_CMDQ_INTR_GEN_REG};
107
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109                                            HCLGE_VECTOR0_OTER_EN_REG,
110                                            HCLGE_MISC_RESET_STS_REG,
111                                            HCLGE_MISC_VECTOR_INT_STS,
112                                            HCLGE_GLOBAL_RESET_REG,
113                                            HCLGE_FUN_RST_ING,
114                                            HCLGE_GRO_EN_REG};
115
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117                                          HCLGE_RING_RX_ADDR_H_REG,
118                                          HCLGE_RING_RX_BD_NUM_REG,
119                                          HCLGE_RING_RX_BD_LENGTH_REG,
120                                          HCLGE_RING_RX_MERGE_EN_REG,
121                                          HCLGE_RING_RX_TAIL_REG,
122                                          HCLGE_RING_RX_HEAD_REG,
123                                          HCLGE_RING_RX_FBD_NUM_REG,
124                                          HCLGE_RING_RX_OFFSET_REG,
125                                          HCLGE_RING_RX_FBD_OFFSET_REG,
126                                          HCLGE_RING_RX_STASH_REG,
127                                          HCLGE_RING_RX_BD_ERR_REG,
128                                          HCLGE_RING_TX_ADDR_L_REG,
129                                          HCLGE_RING_TX_ADDR_H_REG,
130                                          HCLGE_RING_TX_BD_NUM_REG,
131                                          HCLGE_RING_TX_PRIORITY_REG,
132                                          HCLGE_RING_TX_TC_REG,
133                                          HCLGE_RING_TX_MERGE_EN_REG,
134                                          HCLGE_RING_TX_TAIL_REG,
135                                          HCLGE_RING_TX_HEAD_REG,
136                                          HCLGE_RING_TX_FBD_NUM_REG,
137                                          HCLGE_RING_TX_OFFSET_REG,
138                                          HCLGE_RING_TX_EBD_NUM_REG,
139                                          HCLGE_RING_TX_EBD_OFFSET_REG,
140                                          HCLGE_RING_TX_BD_ERR_REG,
141                                          HCLGE_RING_EN_REG};
142
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144                                              HCLGE_TQP_INTR_GL0_REG,
145                                              HCLGE_TQP_INTR_GL1_REG,
146                                              HCLGE_TQP_INTR_GL2_REG,
147                                              HCLGE_TQP_INTR_RL_REG};
148
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
150         "App    Loopback test",
151         "Serdes serial Loopback test",
152         "Serdes parallel Loopback test",
153         "Phy    Loopback test"
154 };
155
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157         {"mac_tx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159         {"mac_rx_mac_pause_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161         {"mac_tx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163         {"mac_rx_control_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165         {"mac_tx_pfc_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167         {"mac_tx_pfc_pri0_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169         {"mac_tx_pfc_pri1_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171         {"mac_tx_pfc_pri2_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173         {"mac_tx_pfc_pri3_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175         {"mac_tx_pfc_pri4_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177         {"mac_tx_pfc_pri5_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179         {"mac_tx_pfc_pri6_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181         {"mac_tx_pfc_pri7_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183         {"mac_rx_pfc_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185         {"mac_rx_pfc_pri0_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187         {"mac_rx_pfc_pri1_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189         {"mac_rx_pfc_pri2_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191         {"mac_rx_pfc_pri3_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193         {"mac_rx_pfc_pri4_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195         {"mac_rx_pfc_pri5_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197         {"mac_rx_pfc_pri6_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199         {"mac_rx_pfc_pri7_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201         {"mac_tx_total_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203         {"mac_tx_total_oct_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205         {"mac_tx_good_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207         {"mac_tx_bad_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209         {"mac_tx_good_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211         {"mac_tx_bad_oct_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213         {"mac_tx_uni_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215         {"mac_tx_multi_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217         {"mac_tx_broad_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219         {"mac_tx_undersize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221         {"mac_tx_oversize_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223         {"mac_tx_64_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225         {"mac_tx_65_127_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227         {"mac_tx_128_255_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229         {"mac_tx_256_511_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231         {"mac_tx_512_1023_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233         {"mac_tx_1024_1518_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235         {"mac_tx_1519_2047_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237         {"mac_tx_2048_4095_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239         {"mac_tx_4096_8191_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241         {"mac_tx_8192_9216_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243         {"mac_tx_9217_12287_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245         {"mac_tx_12288_16383_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247         {"mac_tx_1519_max_good_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249         {"mac_tx_1519_max_bad_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251         {"mac_rx_total_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253         {"mac_rx_total_oct_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255         {"mac_rx_good_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257         {"mac_rx_bad_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259         {"mac_rx_good_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261         {"mac_rx_bad_oct_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263         {"mac_rx_uni_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265         {"mac_rx_multi_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267         {"mac_rx_broad_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269         {"mac_rx_undersize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271         {"mac_rx_oversize_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273         {"mac_rx_64_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275         {"mac_rx_65_127_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277         {"mac_rx_128_255_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279         {"mac_rx_256_511_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281         {"mac_rx_512_1023_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283         {"mac_rx_1024_1518_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285         {"mac_rx_1519_2047_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287         {"mac_rx_2048_4095_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289         {"mac_rx_4096_8191_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291         {"mac_rx_8192_9216_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293         {"mac_rx_9217_12287_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295         {"mac_rx_12288_16383_oct_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297         {"mac_rx_1519_max_good_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299         {"mac_rx_1519_max_bad_pkt_num",
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
301
302         {"mac_tx_fragment_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304         {"mac_tx_undermin_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306         {"mac_tx_jabber_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308         {"mac_tx_err_all_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310         {"mac_tx_from_app_good_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312         {"mac_tx_from_app_bad_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314         {"mac_rx_fragment_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316         {"mac_rx_undermin_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318         {"mac_rx_jabber_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320         {"mac_rx_fcs_err_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322         {"mac_rx_send_app_good_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324         {"mac_rx_send_app_bad_pkt_num",
325                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 };
327
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329         {
330                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333                 .i_port_bitmap = 0x1,
334         },
335 };
336
337 static const u8 hclge_hash_key[] = {
338         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 };
344
345 static const u32 hclge_dfx_bd_offset_list[] = {
346         HCLGE_DFX_BIOS_BD_OFFSET,
347         HCLGE_DFX_SSU_0_BD_OFFSET,
348         HCLGE_DFX_SSU_1_BD_OFFSET,
349         HCLGE_DFX_IGU_BD_OFFSET,
350         HCLGE_DFX_RPU_0_BD_OFFSET,
351         HCLGE_DFX_RPU_1_BD_OFFSET,
352         HCLGE_DFX_NCSI_BD_OFFSET,
353         HCLGE_DFX_RTC_BD_OFFSET,
354         HCLGE_DFX_PPP_BD_OFFSET,
355         HCLGE_DFX_RCB_BD_OFFSET,
356         HCLGE_DFX_TQP_BD_OFFSET,
357         HCLGE_DFX_SSU_2_BD_OFFSET
358 };
359
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361         HCLGE_OPC_DFX_BIOS_COMMON_REG,
362         HCLGE_OPC_DFX_SSU_REG_0,
363         HCLGE_OPC_DFX_SSU_REG_1,
364         HCLGE_OPC_DFX_IGU_EGU_REG,
365         HCLGE_OPC_DFX_RPU_REG_0,
366         HCLGE_OPC_DFX_RPU_REG_1,
367         HCLGE_OPC_DFX_NCSI_REG,
368         HCLGE_OPC_DFX_RTC_REG,
369         HCLGE_OPC_DFX_PPP_REG,
370         HCLGE_OPC_DFX_RCB_REG,
371         HCLGE_OPC_DFX_TQP_REG,
372         HCLGE_OPC_DFX_SSU_REG_2
373 };
374
375 static const struct key_info meta_data_key_info[] = {
376         { PACKET_TYPE_ID, 6},
377         { IP_FRAGEMENT, 1},
378         { ROCE_TYPE, 1},
379         { NEXT_KEY, 5},
380         { VLAN_NUMBER, 2},
381         { SRC_VPORT, 12},
382         { DST_VPORT, 12},
383         { TUNNEL_PACKET, 1},
384 };
385
386 static const struct key_info tuple_key_info[] = {
387         { OUTER_DST_MAC, 48},
388         { OUTER_SRC_MAC, 48},
389         { OUTER_VLAN_TAG_FST, 16},
390         { OUTER_VLAN_TAG_SEC, 16},
391         { OUTER_ETH_TYPE, 16},
392         { OUTER_L2_RSV, 16},
393         { OUTER_IP_TOS, 8},
394         { OUTER_IP_PROTO, 8},
395         { OUTER_SRC_IP, 32},
396         { OUTER_DST_IP, 32},
397         { OUTER_L3_RSV, 16},
398         { OUTER_SRC_PORT, 16},
399         { OUTER_DST_PORT, 16},
400         { OUTER_L4_RSV, 32},
401         { OUTER_TUN_VNI, 24},
402         { OUTER_TUN_FLOW_ID, 8},
403         { INNER_DST_MAC, 48},
404         { INNER_SRC_MAC, 48},
405         { INNER_VLAN_TAG_FST, 16},
406         { INNER_VLAN_TAG_SEC, 16},
407         { INNER_ETH_TYPE, 16},
408         { INNER_L2_RSV, 16},
409         { INNER_IP_TOS, 8},
410         { INNER_IP_PROTO, 8},
411         { INNER_SRC_IP, 32},
412         { INNER_DST_IP, 32},
413         { INNER_L3_RSV, 16},
414         { INNER_SRC_PORT, 16},
415         { INNER_DST_PORT, 16},
416         { INNER_L4_RSV, 32},
417 };
418
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
420 {
421 #define HCLGE_MAC_CMD_NUM 21
422
423         u64 *data = (u64 *)(&hdev->mac_stats);
424         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425         __le64 *desc_data;
426         int i, k, n;
427         int ret;
428
429         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431         if (ret) {
432                 dev_err(&hdev->pdev->dev,
433                         "Get MAC pkt stats fail, status = %d.\n", ret);
434
435                 return ret;
436         }
437
438         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439                 /* for special opcode 0032, only the first desc has the head */
440                 if (unlikely(i == 0)) {
441                         desc_data = (__le64 *)(&desc[i].data[0]);
442                         n = HCLGE_RD_FIRST_STATS_NUM;
443                 } else {
444                         desc_data = (__le64 *)(&desc[i]);
445                         n = HCLGE_RD_OTHER_STATS_NUM;
446                 }
447
448                 for (k = 0; k < n; k++) {
449                         *data += le64_to_cpu(*desc_data);
450                         data++;
451                         desc_data++;
452                 }
453         }
454
455         return 0;
456 }
457
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459 {
460         u64 *data = (u64 *)(&hdev->mac_stats);
461         struct hclge_desc *desc;
462         __le64 *desc_data;
463         u16 i, k, n;
464         int ret;
465
466         /* This may be called inside atomic sections,
467          * so GFP_ATOMIC is more suitalbe here
468          */
469         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
470         if (!desc)
471                 return -ENOMEM;
472
473         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475         if (ret) {
476                 kfree(desc);
477                 return ret;
478         }
479
480         for (i = 0; i < desc_num; i++) {
481                 /* for special opcode 0034, only the first desc has the head */
482                 if (i == 0) {
483                         desc_data = (__le64 *)(&desc[i].data[0]);
484                         n = HCLGE_RD_FIRST_STATS_NUM;
485                 } else {
486                         desc_data = (__le64 *)(&desc[i]);
487                         n = HCLGE_RD_OTHER_STATS_NUM;
488                 }
489
490                 for (k = 0; k < n; k++) {
491                         *data += le64_to_cpu(*desc_data);
492                         data++;
493                         desc_data++;
494                 }
495         }
496
497         kfree(desc);
498
499         return 0;
500 }
501
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503 {
504         struct hclge_desc desc;
505         __le32 *desc_data;
506         u32 reg_num;
507         int ret;
508
509         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511         if (ret)
512                 return ret;
513
514         desc_data = (__le32 *)(&desc.data[0]);
515         reg_num = le32_to_cpu(*desc_data);
516
517         *desc_num = 1 + ((reg_num - 3) >> 2) +
518                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519
520         return 0;
521 }
522
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
524 {
525         u32 desc_num;
526         int ret;
527
528         ret = hclge_mac_query_reg_num(hdev, &desc_num);
529
530         /* The firmware supports the new statistics acquisition method */
531         if (!ret)
532                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533         else if (ret == -EOPNOTSUPP)
534                 ret = hclge_mac_update_stats_defective(hdev);
535         else
536                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537
538         return ret;
539 }
540
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542 {
543         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544         struct hclge_vport *vport = hclge_get_vport(handle);
545         struct hclge_dev *hdev = vport->back;
546         struct hnae3_queue *queue;
547         struct hclge_desc desc[1];
548         struct hclge_tqp *tqp;
549         int ret, i;
550
551         for (i = 0; i < kinfo->num_tqps; i++) {
552                 queue = handle->kinfo.tqp[i];
553                 tqp = container_of(queue, struct hclge_tqp, q);
554                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
556                                            true);
557
558                 desc[0].data[0] = cpu_to_le32(tqp->index);
559                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
560                 if (ret) {
561                         dev_err(&hdev->pdev->dev,
562                                 "Query tqp stat fail, status = %d,queue = %d\n",
563                                 ret, i);
564                         return ret;
565                 }
566                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567                         le32_to_cpu(desc[0].data[1]);
568         }
569
570         for (i = 0; i < kinfo->num_tqps; i++) {
571                 queue = handle->kinfo.tqp[i];
572                 tqp = container_of(queue, struct hclge_tqp, q);
573                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574                 hclge_cmd_setup_basic_desc(&desc[0],
575                                            HCLGE_OPC_QUERY_TX_STATS,
576                                            true);
577
578                 desc[0].data[0] = cpu_to_le32(tqp->index);
579                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
580                 if (ret) {
581                         dev_err(&hdev->pdev->dev,
582                                 "Query tqp stat fail, status = %d,queue = %d\n",
583                                 ret, i);
584                         return ret;
585                 }
586                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587                         le32_to_cpu(desc[0].data[1]);
588         }
589
590         return 0;
591 }
592
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594 {
595         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596         struct hclge_tqp *tqp;
597         u64 *buff = data;
598         int i;
599
600         for (i = 0; i < kinfo->num_tqps; i++) {
601                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603         }
604
605         for (i = 0; i < kinfo->num_tqps; i++) {
606                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
608         }
609
610         return buff;
611 }
612
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614 {
615         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616
617         /* each tqp has TX & RX two queues */
618         return kinfo->num_tqps * (2);
619 }
620
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622 {
623         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624         u8 *buff = data;
625         int i;
626
627         for (i = 0; i < kinfo->num_tqps; i++) {
628                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629                         struct hclge_tqp, q);
630                 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
631                          tqp->index);
632                 buff = buff + ETH_GSTRING_LEN;
633         }
634
635         for (i = 0; i < kinfo->num_tqps; i++) {
636                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637                         struct hclge_tqp, q);
638                 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
639                          tqp->index);
640                 buff = buff + ETH_GSTRING_LEN;
641         }
642
643         return buff;
644 }
645
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647                                  const struct hclge_comm_stats_str strs[],
648                                  int size, u64 *data)
649 {
650         u64 *buf = data;
651         u32 i;
652
653         for (i = 0; i < size; i++)
654                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655
656         return buf + size;
657 }
658
659 static u8 *hclge_comm_get_strings(u32 stringset,
660                                   const struct hclge_comm_stats_str strs[],
661                                   int size, u8 *data)
662 {
663         char *buff = (char *)data;
664         u32 i;
665
666         if (stringset != ETH_SS_STATS)
667                 return buff;
668
669         for (i = 0; i < size; i++) {
670                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671                 buff = buff + ETH_GSTRING_LEN;
672         }
673
674         return (u8 *)buff;
675 }
676
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678 {
679         struct hnae3_handle *handle;
680         int status;
681
682         handle = &hdev->vport[0].nic;
683         if (handle->client) {
684                 status = hclge_tqps_update_stats(handle);
685                 if (status) {
686                         dev_err(&hdev->pdev->dev,
687                                 "Update TQPS stats fail, status = %d.\n",
688                                 status);
689                 }
690         }
691
692         status = hclge_mac_update_stats(hdev);
693         if (status)
694                 dev_err(&hdev->pdev->dev,
695                         "Update MAC stats fail, status = %d.\n", status);
696 }
697
698 static void hclge_update_stats(struct hnae3_handle *handle,
699                                struct net_device_stats *net_stats)
700 {
701         struct hclge_vport *vport = hclge_get_vport(handle);
702         struct hclge_dev *hdev = vport->back;
703         int status;
704
705         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706                 return;
707
708         status = hclge_mac_update_stats(hdev);
709         if (status)
710                 dev_err(&hdev->pdev->dev,
711                         "Update MAC stats fail, status = %d.\n",
712                         status);
713
714         status = hclge_tqps_update_stats(handle);
715         if (status)
716                 dev_err(&hdev->pdev->dev,
717                         "Update TQPS stats fail, status = %d.\n",
718                         status);
719
720         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 }
722
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724 {
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726                 HNAE3_SUPPORT_PHY_LOOPBACK |\
727                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
729
730         struct hclge_vport *vport = hclge_get_vport(handle);
731         struct hclge_dev *hdev = vport->back;
732         int count = 0;
733
734         /* Loopback test support rules:
735          * mac: only GE mode support
736          * serdes: all mac mode will support include GE/XGE/LGE/CGE
737          * phy: only support when phy device exist on board
738          */
739         if (stringset == ETH_SS_TEST) {
740                 /* clear loopback bit flags at first */
741                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746                         count += 1;
747                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
748                 }
749
750                 count += 2;
751                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
753
754                 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
755                      hdev->hw.mac.phydev->drv->set_loopback) ||
756                     hnae3_dev_phy_imp_supported(hdev)) {
757                         count += 1;
758                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
759                 }
760
761         } else if (stringset == ETH_SS_STATS) {
762                 count = ARRAY_SIZE(g_mac_stats_string) +
763                         hclge_tqps_get_sset_count(handle, stringset);
764         }
765
766         return count;
767 }
768
769 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
770                               u8 *data)
771 {
772         u8 *p = (char *)data;
773         int size;
774
775         if (stringset == ETH_SS_STATS) {
776                 size = ARRAY_SIZE(g_mac_stats_string);
777                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
778                                            size, p);
779                 p = hclge_tqps_get_strings(handle, p);
780         } else if (stringset == ETH_SS_TEST) {
781                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
782                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
783                                ETH_GSTRING_LEN);
784                         p += ETH_GSTRING_LEN;
785                 }
786                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
787                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
788                                ETH_GSTRING_LEN);
789                         p += ETH_GSTRING_LEN;
790                 }
791                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
792                         memcpy(p,
793                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
794                                ETH_GSTRING_LEN);
795                         p += ETH_GSTRING_LEN;
796                 }
797                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
798                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
799                                ETH_GSTRING_LEN);
800                         p += ETH_GSTRING_LEN;
801                 }
802         }
803 }
804
805 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
806 {
807         struct hclge_vport *vport = hclge_get_vport(handle);
808         struct hclge_dev *hdev = vport->back;
809         u64 *p;
810
811         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
812                                  ARRAY_SIZE(g_mac_stats_string), data);
813         p = hclge_tqps_get_stats(handle, p);
814 }
815
816 static void hclge_get_mac_stat(struct hnae3_handle *handle,
817                                struct hns3_mac_stats *mac_stats)
818 {
819         struct hclge_vport *vport = hclge_get_vport(handle);
820         struct hclge_dev *hdev = vport->back;
821
822         hclge_update_stats(handle, NULL);
823
824         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
825         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
826 }
827
828 static int hclge_parse_func_status(struct hclge_dev *hdev,
829                                    struct hclge_func_status_cmd *status)
830 {
831 #define HCLGE_MAC_ID_MASK       0xF
832
833         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
834                 return -EINVAL;
835
836         /* Set the pf to main pf */
837         if (status->pf_state & HCLGE_PF_STATE_MAIN)
838                 hdev->flag |= HCLGE_FLAG_MAIN;
839         else
840                 hdev->flag &= ~HCLGE_FLAG_MAIN;
841
842         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
843         return 0;
844 }
845
846 static int hclge_query_function_status(struct hclge_dev *hdev)
847 {
848 #define HCLGE_QUERY_MAX_CNT     5
849
850         struct hclge_func_status_cmd *req;
851         struct hclge_desc desc;
852         int timeout = 0;
853         int ret;
854
855         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
856         req = (struct hclge_func_status_cmd *)desc.data;
857
858         do {
859                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860                 if (ret) {
861                         dev_err(&hdev->pdev->dev,
862                                 "query function status failed %d.\n", ret);
863                         return ret;
864                 }
865
866                 /* Check pf reset is done */
867                 if (req->pf_state)
868                         break;
869                 usleep_range(1000, 2000);
870         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
871
872         return hclge_parse_func_status(hdev, req);
873 }
874
875 static int hclge_query_pf_resource(struct hclge_dev *hdev)
876 {
877         struct hclge_pf_res_cmd *req;
878         struct hclge_desc desc;
879         int ret;
880
881         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
882         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
883         if (ret) {
884                 dev_err(&hdev->pdev->dev,
885                         "query pf resource failed %d.\n", ret);
886                 return ret;
887         }
888
889         req = (struct hclge_pf_res_cmd *)desc.data;
890         hdev->num_tqps = le16_to_cpu(req->tqp_num) +
891                          le16_to_cpu(req->ext_tqp_num);
892         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
893
894         if (req->tx_buf_size)
895                 hdev->tx_buf_size =
896                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
897         else
898                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
899
900         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
901
902         if (req->dv_buf_size)
903                 hdev->dv_buf_size =
904                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
905         else
906                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
907
908         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
909
910         hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
911         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
912                 dev_err(&hdev->pdev->dev,
913                         "only %u msi resources available, not enough for pf(min:2).\n",
914                         hdev->num_nic_msi);
915                 return -EINVAL;
916         }
917
918         if (hnae3_dev_roce_supported(hdev)) {
919                 hdev->num_roce_msi =
920                         le16_to_cpu(req->pf_intr_vector_number_roce);
921
922                 /* PF should have NIC vectors and Roce vectors,
923                  * NIC vectors are queued before Roce vectors.
924                  */
925                 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
926         } else {
927                 hdev->num_msi = hdev->num_nic_msi;
928         }
929
930         return 0;
931 }
932
933 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
934 {
935         switch (speed_cmd) {
936         case 6:
937                 *speed = HCLGE_MAC_SPEED_10M;
938                 break;
939         case 7:
940                 *speed = HCLGE_MAC_SPEED_100M;
941                 break;
942         case 0:
943                 *speed = HCLGE_MAC_SPEED_1G;
944                 break;
945         case 1:
946                 *speed = HCLGE_MAC_SPEED_10G;
947                 break;
948         case 2:
949                 *speed = HCLGE_MAC_SPEED_25G;
950                 break;
951         case 3:
952                 *speed = HCLGE_MAC_SPEED_40G;
953                 break;
954         case 4:
955                 *speed = HCLGE_MAC_SPEED_50G;
956                 break;
957         case 5:
958                 *speed = HCLGE_MAC_SPEED_100G;
959                 break;
960         case 8:
961                 *speed = HCLGE_MAC_SPEED_200G;
962                 break;
963         default:
964                 return -EINVAL;
965         }
966
967         return 0;
968 }
969
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
971 {
972         struct hclge_vport *vport = hclge_get_vport(handle);
973         struct hclge_dev *hdev = vport->back;
974         u32 speed_ability = hdev->hw.mac.speed_ability;
975         u32 speed_bit = 0;
976
977         switch (speed) {
978         case HCLGE_MAC_SPEED_10M:
979                 speed_bit = HCLGE_SUPPORT_10M_BIT;
980                 break;
981         case HCLGE_MAC_SPEED_100M:
982                 speed_bit = HCLGE_SUPPORT_100M_BIT;
983                 break;
984         case HCLGE_MAC_SPEED_1G:
985                 speed_bit = HCLGE_SUPPORT_1G_BIT;
986                 break;
987         case HCLGE_MAC_SPEED_10G:
988                 speed_bit = HCLGE_SUPPORT_10G_BIT;
989                 break;
990         case HCLGE_MAC_SPEED_25G:
991                 speed_bit = HCLGE_SUPPORT_25G_BIT;
992                 break;
993         case HCLGE_MAC_SPEED_40G:
994                 speed_bit = HCLGE_SUPPORT_40G_BIT;
995                 break;
996         case HCLGE_MAC_SPEED_50G:
997                 speed_bit = HCLGE_SUPPORT_50G_BIT;
998                 break;
999         case HCLGE_MAC_SPEED_100G:
1000                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1001                 break;
1002         case HCLGE_MAC_SPEED_200G:
1003                 speed_bit = HCLGE_SUPPORT_200G_BIT;
1004                 break;
1005         default:
1006                 return -EINVAL;
1007         }
1008
1009         if (speed_bit & speed_ability)
1010                 return 0;
1011
1012         return -EINVAL;
1013 }
1014
1015 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1016 {
1017         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1018                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1019                                  mac->supported);
1020         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1021                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1022                                  mac->supported);
1023         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1024                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1025                                  mac->supported);
1026         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1027                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1028                                  mac->supported);
1029         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1030                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031                                  mac->supported);
1032         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1033                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1034                                  mac->supported);
1035 }
1036
1037 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1038 {
1039         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1040                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1041                                  mac->supported);
1042         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1044                                  mac->supported);
1045         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1047                                  mac->supported);
1048         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1050                                  mac->supported);
1051         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1052                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1053                                  mac->supported);
1054         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1055                 linkmode_set_bit(
1056                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1057                         mac->supported);
1058 }
1059
1060 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1061 {
1062         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1063                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1064                                  mac->supported);
1065         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1066                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1067                                  mac->supported);
1068         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1069                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1070                                  mac->supported);
1071         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1072                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1073                                  mac->supported);
1074         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1075                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1076                                  mac->supported);
1077         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1078                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1079                                  mac->supported);
1080 }
1081
1082 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1083 {
1084         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1085                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1086                                  mac->supported);
1087         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1089                                  mac->supported);
1090         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1092                                  mac->supported);
1093         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1095                                  mac->supported);
1096         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1098                                  mac->supported);
1099         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1101                                  mac->supported);
1102         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1104                                  mac->supported);
1105 }
1106
1107 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1108 {
1109         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1110         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1111
1112         switch (mac->speed) {
1113         case HCLGE_MAC_SPEED_10G:
1114         case HCLGE_MAC_SPEED_40G:
1115                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1116                                  mac->supported);
1117                 mac->fec_ability =
1118                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1119                 break;
1120         case HCLGE_MAC_SPEED_25G:
1121         case HCLGE_MAC_SPEED_50G:
1122                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1123                                  mac->supported);
1124                 mac->fec_ability =
1125                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1126                         BIT(HNAE3_FEC_AUTO);
1127                 break;
1128         case HCLGE_MAC_SPEED_100G:
1129         case HCLGE_MAC_SPEED_200G:
1130                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1131                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1132                 break;
1133         default:
1134                 mac->fec_ability = 0;
1135                 break;
1136         }
1137 }
1138
1139 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1140                                         u16 speed_ability)
1141 {
1142         struct hclge_mac *mac = &hdev->hw.mac;
1143
1144         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1145                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1146                                  mac->supported);
1147
1148         hclge_convert_setting_sr(mac, speed_ability);
1149         hclge_convert_setting_lr(mac, speed_ability);
1150         hclge_convert_setting_cr(mac, speed_ability);
1151         if (hnae3_dev_fec_supported(hdev))
1152                 hclge_convert_setting_fec(mac);
1153
1154         if (hnae3_dev_pause_supported(hdev))
1155                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1156
1157         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1158         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1159 }
1160
1161 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1162                                             u16 speed_ability)
1163 {
1164         struct hclge_mac *mac = &hdev->hw.mac;
1165
1166         hclge_convert_setting_kr(mac, speed_ability);
1167         if (hnae3_dev_fec_supported(hdev))
1168                 hclge_convert_setting_fec(mac);
1169
1170         if (hnae3_dev_pause_supported(hdev))
1171                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1172
1173         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1174         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1175 }
1176
1177 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1178                                          u16 speed_ability)
1179 {
1180         unsigned long *supported = hdev->hw.mac.supported;
1181
1182         /* default to support all speed for GE port */
1183         if (!speed_ability)
1184                 speed_ability = HCLGE_SUPPORT_GE;
1185
1186         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1187                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1188                                  supported);
1189
1190         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1191                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1192                                  supported);
1193                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1194                                  supported);
1195         }
1196
1197         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1198                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1199                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1200         }
1201
1202         if (hnae3_dev_pause_supported(hdev)) {
1203                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1204                 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1205         }
1206
1207         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1208         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1209 }
1210
1211 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1212 {
1213         u8 media_type = hdev->hw.mac.media_type;
1214
1215         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1216                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1217         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1218                 hclge_parse_copper_link_mode(hdev, speed_ability);
1219         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1220                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1221 }
1222
1223 static u32 hclge_get_max_speed(u16 speed_ability)
1224 {
1225         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1226                 return HCLGE_MAC_SPEED_200G;
1227
1228         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1229                 return HCLGE_MAC_SPEED_100G;
1230
1231         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1232                 return HCLGE_MAC_SPEED_50G;
1233
1234         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1235                 return HCLGE_MAC_SPEED_40G;
1236
1237         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1238                 return HCLGE_MAC_SPEED_25G;
1239
1240         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1241                 return HCLGE_MAC_SPEED_10G;
1242
1243         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1244                 return HCLGE_MAC_SPEED_1G;
1245
1246         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1247                 return HCLGE_MAC_SPEED_100M;
1248
1249         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1250                 return HCLGE_MAC_SPEED_10M;
1251
1252         return HCLGE_MAC_SPEED_1G;
1253 }
1254
1255 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1256 {
1257 #define SPEED_ABILITY_EXT_SHIFT                 8
1258
1259         struct hclge_cfg_param_cmd *req;
1260         u64 mac_addr_tmp_high;
1261         u16 speed_ability_ext;
1262         u64 mac_addr_tmp;
1263         unsigned int i;
1264
1265         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1266
1267         /* get the configuration */
1268         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1269                                               HCLGE_CFG_VMDQ_M,
1270                                               HCLGE_CFG_VMDQ_S);
1271         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1272                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1273         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1274                                             HCLGE_CFG_TQP_DESC_N_M,
1275                                             HCLGE_CFG_TQP_DESC_N_S);
1276
1277         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1278                                         HCLGE_CFG_PHY_ADDR_M,
1279                                         HCLGE_CFG_PHY_ADDR_S);
1280         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1281                                           HCLGE_CFG_MEDIA_TP_M,
1282                                           HCLGE_CFG_MEDIA_TP_S);
1283         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1284                                           HCLGE_CFG_RX_BUF_LEN_M,
1285                                           HCLGE_CFG_RX_BUF_LEN_S);
1286         /* get mac_address */
1287         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1288         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1289                                             HCLGE_CFG_MAC_ADDR_H_M,
1290                                             HCLGE_CFG_MAC_ADDR_H_S);
1291
1292         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1293
1294         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1295                                              HCLGE_CFG_DEFAULT_SPEED_M,
1296                                              HCLGE_CFG_DEFAULT_SPEED_S);
1297         cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1298                                                HCLGE_CFG_RSS_SIZE_M,
1299                                                HCLGE_CFG_RSS_SIZE_S);
1300
1301         for (i = 0; i < ETH_ALEN; i++)
1302                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1303
1304         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1305         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1306
1307         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1308                                              HCLGE_CFG_SPEED_ABILITY_M,
1309                                              HCLGE_CFG_SPEED_ABILITY_S);
1310         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1311                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1312                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1313         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1314
1315         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1316                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1317                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1318         if (!cfg->umv_space)
1319                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1320
1321         cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1322                                                HCLGE_CFG_PF_RSS_SIZE_M,
1323                                                HCLGE_CFG_PF_RSS_SIZE_S);
1324
1325         /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1326          * power of 2, instead of reading out directly. This would
1327          * be more flexible for future changes and expansions.
1328          * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1329          * it does not make sense if PF's field is 0. In this case, PF and VF
1330          * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1331          */
1332         cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1333                                1U << cfg->pf_rss_size_max :
1334                                cfg->vf_rss_size_max;
1335 }
1336
1337 /* hclge_get_cfg: query the static parameter from flash
1338  * @hdev: pointer to struct hclge_dev
1339  * @hcfg: the config structure to be getted
1340  */
1341 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1342 {
1343         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1344         struct hclge_cfg_param_cmd *req;
1345         unsigned int i;
1346         int ret;
1347
1348         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1349                 u32 offset = 0;
1350
1351                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1352                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1353                                            true);
1354                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1355                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1356                 /* Len should be united by 4 bytes when send to hardware */
1357                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1358                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1359                 req->offset = cpu_to_le32(offset);
1360         }
1361
1362         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1363         if (ret) {
1364                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1365                 return ret;
1366         }
1367
1368         hclge_parse_cfg(hcfg, desc);
1369
1370         return 0;
1371 }
1372
1373 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1374 {
1375 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1376
1377         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1378
1379         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1380         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1381         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1382         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1383         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1384         ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1385         ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1386 }
1387
1388 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1389                                   struct hclge_desc *desc)
1390 {
1391         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1392         struct hclge_dev_specs_0_cmd *req0;
1393         struct hclge_dev_specs_1_cmd *req1;
1394
1395         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1396         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1397
1398         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1399         ae_dev->dev_specs.rss_ind_tbl_size =
1400                 le16_to_cpu(req0->rss_ind_tbl_size);
1401         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1402         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1403         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1404         ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1405         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1406         ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1407 }
1408
1409 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1410 {
1411         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1412
1413         if (!dev_specs->max_non_tso_bd_num)
1414                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1415         if (!dev_specs->rss_ind_tbl_size)
1416                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417         if (!dev_specs->rss_key_size)
1418                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1419         if (!dev_specs->max_tm_rate)
1420                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1421         if (!dev_specs->max_qset_num)
1422                 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1423         if (!dev_specs->max_int_gl)
1424                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1425         if (!dev_specs->max_frm_size)
1426                 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1427 }
1428
1429 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1430 {
1431         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1432         int ret;
1433         int i;
1434
1435         /* set default specifications as devices lower than version V3 do not
1436          * support querying specifications from firmware.
1437          */
1438         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1439                 hclge_set_default_dev_specs(hdev);
1440                 return 0;
1441         }
1442
1443         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1444                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1445                                            true);
1446                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1447         }
1448         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1449
1450         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1451         if (ret)
1452                 return ret;
1453
1454         hclge_parse_dev_specs(hdev, desc);
1455         hclge_check_dev_specs(hdev);
1456
1457         return 0;
1458 }
1459
1460 static int hclge_get_cap(struct hclge_dev *hdev)
1461 {
1462         int ret;
1463
1464         ret = hclge_query_function_status(hdev);
1465         if (ret) {
1466                 dev_err(&hdev->pdev->dev,
1467                         "query function status error %d.\n", ret);
1468                 return ret;
1469         }
1470
1471         /* get pf resource */
1472         return hclge_query_pf_resource(hdev);
1473 }
1474
1475 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1476 {
1477 #define HCLGE_MIN_TX_DESC       64
1478 #define HCLGE_MIN_RX_DESC       64
1479
1480         if (!is_kdump_kernel())
1481                 return;
1482
1483         dev_info(&hdev->pdev->dev,
1484                  "Running kdump kernel. Using minimal resources\n");
1485
1486         /* minimal queue pairs equals to the number of vports */
1487         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1488         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1489         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1490 }
1491
1492 static int hclge_configure(struct hclge_dev *hdev)
1493 {
1494         struct hclge_cfg cfg;
1495         unsigned int i;
1496         int ret;
1497
1498         ret = hclge_get_cfg(hdev, &cfg);
1499         if (ret)
1500                 return ret;
1501
1502         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1503         hdev->base_tqp_pid = 0;
1504         hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1505         hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1506         hdev->rx_buf_len = cfg.rx_buf_len;
1507         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1508         hdev->hw.mac.media_type = cfg.media_type;
1509         hdev->hw.mac.phy_addr = cfg.phy_addr;
1510         hdev->num_tx_desc = cfg.tqp_desc_num;
1511         hdev->num_rx_desc = cfg.tqp_desc_num;
1512         hdev->tm_info.num_pg = 1;
1513         hdev->tc_max = cfg.tc_num;
1514         hdev->tm_info.hw_pfc_map = 0;
1515         hdev->wanted_umv_size = cfg.umv_space;
1516
1517         if (hnae3_dev_fd_supported(hdev)) {
1518                 hdev->fd_en = true;
1519                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1520         }
1521
1522         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1523         if (ret) {
1524                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1525                         cfg.default_speed, ret);
1526                 return ret;
1527         }
1528
1529         hclge_parse_link_mode(hdev, cfg.speed_ability);
1530
1531         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1532
1533         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1534             (hdev->tc_max < 1)) {
1535                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1536                          hdev->tc_max);
1537                 hdev->tc_max = 1;
1538         }
1539
1540         /* Dev does not support DCB */
1541         if (!hnae3_dev_dcb_supported(hdev)) {
1542                 hdev->tc_max = 1;
1543                 hdev->pfc_max = 0;
1544         } else {
1545                 hdev->pfc_max = hdev->tc_max;
1546         }
1547
1548         hdev->tm_info.num_tc = 1;
1549
1550         /* Currently not support uncontiuous tc */
1551         for (i = 0; i < hdev->tm_info.num_tc; i++)
1552                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1553
1554         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1555
1556         hclge_init_kdump_kernel_config(hdev);
1557
1558         /* Set the init affinity based on pci func number */
1559         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1560         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1561         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1562                         &hdev->affinity_mask);
1563
1564         return ret;
1565 }
1566
1567 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1568                             u16 tso_mss_max)
1569 {
1570         struct hclge_cfg_tso_status_cmd *req;
1571         struct hclge_desc desc;
1572
1573         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1574
1575         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1576         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1577         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1578
1579         return hclge_cmd_send(&hdev->hw, &desc, 1);
1580 }
1581
1582 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1583 {
1584         struct hclge_cfg_gro_status_cmd *req;
1585         struct hclge_desc desc;
1586         int ret;
1587
1588         if (!hnae3_dev_gro_supported(hdev))
1589                 return 0;
1590
1591         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1592         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1593
1594         req->gro_en = en ? 1 : 0;
1595
1596         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1597         if (ret)
1598                 dev_err(&hdev->pdev->dev,
1599                         "GRO hardware config cmd failed, ret = %d\n", ret);
1600
1601         return ret;
1602 }
1603
1604 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1605 {
1606         struct hclge_tqp *tqp;
1607         int i;
1608
1609         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1610                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1611         if (!hdev->htqp)
1612                 return -ENOMEM;
1613
1614         tqp = hdev->htqp;
1615
1616         for (i = 0; i < hdev->num_tqps; i++) {
1617                 tqp->dev = &hdev->pdev->dev;
1618                 tqp->index = i;
1619
1620                 tqp->q.ae_algo = &ae_algo;
1621                 tqp->q.buf_size = hdev->rx_buf_len;
1622                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1623                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1624
1625                 /* need an extended offset to configure queues >=
1626                  * HCLGE_TQP_MAX_SIZE_DEV_V2
1627                  */
1628                 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1629                         tqp->q.io_base = hdev->hw.io_base +
1630                                          HCLGE_TQP_REG_OFFSET +
1631                                          i * HCLGE_TQP_REG_SIZE;
1632                 else
1633                         tqp->q.io_base = hdev->hw.io_base +
1634                                          HCLGE_TQP_REG_OFFSET +
1635                                          HCLGE_TQP_EXT_REG_OFFSET +
1636                                          (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1637                                          HCLGE_TQP_REG_SIZE;
1638
1639                 tqp++;
1640         }
1641
1642         return 0;
1643 }
1644
1645 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1646                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1647 {
1648         struct hclge_tqp_map_cmd *req;
1649         struct hclge_desc desc;
1650         int ret;
1651
1652         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1653
1654         req = (struct hclge_tqp_map_cmd *)desc.data;
1655         req->tqp_id = cpu_to_le16(tqp_pid);
1656         req->tqp_vf = func_id;
1657         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1658         if (!is_pf)
1659                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1660         req->tqp_vid = cpu_to_le16(tqp_vid);
1661
1662         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1663         if (ret)
1664                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1665
1666         return ret;
1667 }
1668
1669 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1670 {
1671         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1672         struct hclge_dev *hdev = vport->back;
1673         int i, alloced;
1674
1675         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1676              alloced < num_tqps; i++) {
1677                 if (!hdev->htqp[i].alloced) {
1678                         hdev->htqp[i].q.handle = &vport->nic;
1679                         hdev->htqp[i].q.tqp_index = alloced;
1680                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1681                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1682                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1683                         hdev->htqp[i].alloced = true;
1684                         alloced++;
1685                 }
1686         }
1687         vport->alloc_tqps = alloced;
1688         kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1689                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1690
1691         /* ensure one to one mapping between irq and queue at default */
1692         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1693                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1694
1695         return 0;
1696 }
1697
1698 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1699                             u16 num_tx_desc, u16 num_rx_desc)
1700
1701 {
1702         struct hnae3_handle *nic = &vport->nic;
1703         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1704         struct hclge_dev *hdev = vport->back;
1705         int ret;
1706
1707         kinfo->num_tx_desc = num_tx_desc;
1708         kinfo->num_rx_desc = num_rx_desc;
1709
1710         kinfo->rx_buf_len = hdev->rx_buf_len;
1711
1712         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1713                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1714         if (!kinfo->tqp)
1715                 return -ENOMEM;
1716
1717         ret = hclge_assign_tqp(vport, num_tqps);
1718         if (ret)
1719                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1720
1721         return ret;
1722 }
1723
1724 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1725                                   struct hclge_vport *vport)
1726 {
1727         struct hnae3_handle *nic = &vport->nic;
1728         struct hnae3_knic_private_info *kinfo;
1729         u16 i;
1730
1731         kinfo = &nic->kinfo;
1732         for (i = 0; i < vport->alloc_tqps; i++) {
1733                 struct hclge_tqp *q =
1734                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1735                 bool is_pf;
1736                 int ret;
1737
1738                 is_pf = !(vport->vport_id);
1739                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1740                                              i, is_pf);
1741                 if (ret)
1742                         return ret;
1743         }
1744
1745         return 0;
1746 }
1747
1748 static int hclge_map_tqp(struct hclge_dev *hdev)
1749 {
1750         struct hclge_vport *vport = hdev->vport;
1751         u16 i, num_vport;
1752
1753         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1754         for (i = 0; i < num_vport; i++) {
1755                 int ret;
1756
1757                 ret = hclge_map_tqp_to_vport(hdev, vport);
1758                 if (ret)
1759                         return ret;
1760
1761                 vport++;
1762         }
1763
1764         return 0;
1765 }
1766
1767 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1768 {
1769         struct hnae3_handle *nic = &vport->nic;
1770         struct hclge_dev *hdev = vport->back;
1771         int ret;
1772
1773         nic->pdev = hdev->pdev;
1774         nic->ae_algo = &ae_algo;
1775         nic->numa_node_mask = hdev->numa_node_mask;
1776
1777         ret = hclge_knic_setup(vport, num_tqps,
1778                                hdev->num_tx_desc, hdev->num_rx_desc);
1779         if (ret)
1780                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1781
1782         return ret;
1783 }
1784
1785 static int hclge_alloc_vport(struct hclge_dev *hdev)
1786 {
1787         struct pci_dev *pdev = hdev->pdev;
1788         struct hclge_vport *vport;
1789         u32 tqp_main_vport;
1790         u32 tqp_per_vport;
1791         int num_vport, i;
1792         int ret;
1793
1794         /* We need to alloc a vport for main NIC of PF */
1795         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1796
1797         if (hdev->num_tqps < num_vport) {
1798                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1799                         hdev->num_tqps, num_vport);
1800                 return -EINVAL;
1801         }
1802
1803         /* Alloc the same number of TQPs for every vport */
1804         tqp_per_vport = hdev->num_tqps / num_vport;
1805         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1806
1807         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1808                              GFP_KERNEL);
1809         if (!vport)
1810                 return -ENOMEM;
1811
1812         hdev->vport = vport;
1813         hdev->num_alloc_vport = num_vport;
1814
1815         if (IS_ENABLED(CONFIG_PCI_IOV))
1816                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1817
1818         for (i = 0; i < num_vport; i++) {
1819                 vport->back = hdev;
1820                 vport->vport_id = i;
1821                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1822                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1823                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1824                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1825                 INIT_LIST_HEAD(&vport->vlan_list);
1826                 INIT_LIST_HEAD(&vport->uc_mac_list);
1827                 INIT_LIST_HEAD(&vport->mc_mac_list);
1828                 spin_lock_init(&vport->mac_list_lock);
1829
1830                 if (i == 0)
1831                         ret = hclge_vport_setup(vport, tqp_main_vport);
1832                 else
1833                         ret = hclge_vport_setup(vport, tqp_per_vport);
1834                 if (ret) {
1835                         dev_err(&pdev->dev,
1836                                 "vport setup failed for vport %d, %d\n",
1837                                 i, ret);
1838                         return ret;
1839                 }
1840
1841                 vport++;
1842         }
1843
1844         return 0;
1845 }
1846
1847 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1848                                     struct hclge_pkt_buf_alloc *buf_alloc)
1849 {
1850 /* TX buffer size is unit by 128 byte */
1851 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1852 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1853         struct hclge_tx_buff_alloc_cmd *req;
1854         struct hclge_desc desc;
1855         int ret;
1856         u8 i;
1857
1858         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1859
1860         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1861         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1862                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1863
1864                 req->tx_pkt_buff[i] =
1865                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1866                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1867         }
1868
1869         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1870         if (ret)
1871                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1872                         ret);
1873
1874         return ret;
1875 }
1876
1877 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1878                                  struct hclge_pkt_buf_alloc *buf_alloc)
1879 {
1880         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1881
1882         if (ret)
1883                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1884
1885         return ret;
1886 }
1887
1888 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1889 {
1890         unsigned int i;
1891         u32 cnt = 0;
1892
1893         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1894                 if (hdev->hw_tc_map & BIT(i))
1895                         cnt++;
1896         return cnt;
1897 }
1898
1899 /* Get the number of pfc enabled TCs, which have private buffer */
1900 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1901                                   struct hclge_pkt_buf_alloc *buf_alloc)
1902 {
1903         struct hclge_priv_buf *priv;
1904         unsigned int i;
1905         int cnt = 0;
1906
1907         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1908                 priv = &buf_alloc->priv_buf[i];
1909                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1910                     priv->enable)
1911                         cnt++;
1912         }
1913
1914         return cnt;
1915 }
1916
1917 /* Get the number of pfc disabled TCs, which have private buffer */
1918 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1919                                      struct hclge_pkt_buf_alloc *buf_alloc)
1920 {
1921         struct hclge_priv_buf *priv;
1922         unsigned int i;
1923         int cnt = 0;
1924
1925         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1926                 priv = &buf_alloc->priv_buf[i];
1927                 if (hdev->hw_tc_map & BIT(i) &&
1928                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1929                     priv->enable)
1930                         cnt++;
1931         }
1932
1933         return cnt;
1934 }
1935
1936 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1937 {
1938         struct hclge_priv_buf *priv;
1939         u32 rx_priv = 0;
1940         int i;
1941
1942         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1943                 priv = &buf_alloc->priv_buf[i];
1944                 if (priv->enable)
1945                         rx_priv += priv->buf_size;
1946         }
1947         return rx_priv;
1948 }
1949
1950 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1951 {
1952         u32 i, total_tx_size = 0;
1953
1954         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1955                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1956
1957         return total_tx_size;
1958 }
1959
1960 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1961                                 struct hclge_pkt_buf_alloc *buf_alloc,
1962                                 u32 rx_all)
1963 {
1964         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1965         u32 tc_num = hclge_get_tc_num(hdev);
1966         u32 shared_buf, aligned_mps;
1967         u32 rx_priv;
1968         int i;
1969
1970         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1971
1972         if (hnae3_dev_dcb_supported(hdev))
1973                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1974                                         hdev->dv_buf_size;
1975         else
1976                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1977                                         + hdev->dv_buf_size;
1978
1979         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1980         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1981                              HCLGE_BUF_SIZE_UNIT);
1982
1983         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1984         if (rx_all < rx_priv + shared_std)
1985                 return false;
1986
1987         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1988         buf_alloc->s_buf.buf_size = shared_buf;
1989         if (hnae3_dev_dcb_supported(hdev)) {
1990                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1991                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1992                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1993                                   HCLGE_BUF_SIZE_UNIT);
1994         } else {
1995                 buf_alloc->s_buf.self.high = aligned_mps +
1996                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1997                 buf_alloc->s_buf.self.low = aligned_mps;
1998         }
1999
2000         if (hnae3_dev_dcb_supported(hdev)) {
2001                 hi_thrd = shared_buf - hdev->dv_buf_size;
2002
2003                 if (tc_num <= NEED_RESERVE_TC_NUM)
2004                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2005                                         / BUF_MAX_PERCENT;
2006
2007                 if (tc_num)
2008                         hi_thrd = hi_thrd / tc_num;
2009
2010                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2011                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2012                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2013         } else {
2014                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2015                 lo_thrd = aligned_mps;
2016         }
2017
2018         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2019                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2020                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2021         }
2022
2023         return true;
2024 }
2025
2026 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2027                                 struct hclge_pkt_buf_alloc *buf_alloc)
2028 {
2029         u32 i, total_size;
2030
2031         total_size = hdev->pkt_buf_size;
2032
2033         /* alloc tx buffer for all enabled tc */
2034         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2035                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2036
2037                 if (hdev->hw_tc_map & BIT(i)) {
2038                         if (total_size < hdev->tx_buf_size)
2039                                 return -ENOMEM;
2040
2041                         priv->tx_buf_size = hdev->tx_buf_size;
2042                 } else {
2043                         priv->tx_buf_size = 0;
2044                 }
2045
2046                 total_size -= priv->tx_buf_size;
2047         }
2048
2049         return 0;
2050 }
2051
2052 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2053                                   struct hclge_pkt_buf_alloc *buf_alloc)
2054 {
2055         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2056         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2057         unsigned int i;
2058
2059         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2060                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2061
2062                 priv->enable = 0;
2063                 priv->wl.low = 0;
2064                 priv->wl.high = 0;
2065                 priv->buf_size = 0;
2066
2067                 if (!(hdev->hw_tc_map & BIT(i)))
2068                         continue;
2069
2070                 priv->enable = 1;
2071
2072                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2073                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2074                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2075                                                 HCLGE_BUF_SIZE_UNIT);
2076                 } else {
2077                         priv->wl.low = 0;
2078                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2079                                         aligned_mps;
2080                 }
2081
2082                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2083         }
2084
2085         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2086 }
2087
2088 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2089                                           struct hclge_pkt_buf_alloc *buf_alloc)
2090 {
2091         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2092         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2093         int i;
2094
2095         /* let the last to be cleared first */
2096         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2097                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2098                 unsigned int mask = BIT((unsigned int)i);
2099
2100                 if (hdev->hw_tc_map & mask &&
2101                     !(hdev->tm_info.hw_pfc_map & mask)) {
2102                         /* Clear the no pfc TC private buffer */
2103                         priv->wl.low = 0;
2104                         priv->wl.high = 0;
2105                         priv->buf_size = 0;
2106                         priv->enable = 0;
2107                         no_pfc_priv_num--;
2108                 }
2109
2110                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2111                     no_pfc_priv_num == 0)
2112                         break;
2113         }
2114
2115         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2116 }
2117
2118 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2119                                         struct hclge_pkt_buf_alloc *buf_alloc)
2120 {
2121         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2122         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2123         int i;
2124
2125         /* let the last to be cleared first */
2126         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2127                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2128                 unsigned int mask = BIT((unsigned int)i);
2129
2130                 if (hdev->hw_tc_map & mask &&
2131                     hdev->tm_info.hw_pfc_map & mask) {
2132                         /* Reduce the number of pfc TC with private buffer */
2133                         priv->wl.low = 0;
2134                         priv->enable = 0;
2135                         priv->wl.high = 0;
2136                         priv->buf_size = 0;
2137                         pfc_priv_num--;
2138                 }
2139
2140                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2141                     pfc_priv_num == 0)
2142                         break;
2143         }
2144
2145         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2146 }
2147
2148 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2149                                       struct hclge_pkt_buf_alloc *buf_alloc)
2150 {
2151 #define COMPENSATE_BUFFER       0x3C00
2152 #define COMPENSATE_HALF_MPS_NUM 5
2153 #define PRIV_WL_GAP             0x1800
2154
2155         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2156         u32 tc_num = hclge_get_tc_num(hdev);
2157         u32 half_mps = hdev->mps >> 1;
2158         u32 min_rx_priv;
2159         unsigned int i;
2160
2161         if (tc_num)
2162                 rx_priv = rx_priv / tc_num;
2163
2164         if (tc_num <= NEED_RESERVE_TC_NUM)
2165                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2166
2167         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2168                         COMPENSATE_HALF_MPS_NUM * half_mps;
2169         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2170         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2171
2172         if (rx_priv < min_rx_priv)
2173                 return false;
2174
2175         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2176                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2177
2178                 priv->enable = 0;
2179                 priv->wl.low = 0;
2180                 priv->wl.high = 0;
2181                 priv->buf_size = 0;
2182
2183                 if (!(hdev->hw_tc_map & BIT(i)))
2184                         continue;
2185
2186                 priv->enable = 1;
2187                 priv->buf_size = rx_priv;
2188                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2189                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2190         }
2191
2192         buf_alloc->s_buf.buf_size = 0;
2193
2194         return true;
2195 }
2196
2197 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2198  * @hdev: pointer to struct hclge_dev
2199  * @buf_alloc: pointer to buffer calculation data
2200  * @return: 0: calculate sucessful, negative: fail
2201  */
2202 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2203                                 struct hclge_pkt_buf_alloc *buf_alloc)
2204 {
2205         /* When DCB is not supported, rx private buffer is not allocated. */
2206         if (!hnae3_dev_dcb_supported(hdev)) {
2207                 u32 rx_all = hdev->pkt_buf_size;
2208
2209                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2210                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2211                         return -ENOMEM;
2212
2213                 return 0;
2214         }
2215
2216         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2217                 return 0;
2218
2219         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2220                 return 0;
2221
2222         /* try to decrease the buffer size */
2223         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2224                 return 0;
2225
2226         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2227                 return 0;
2228
2229         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2230                 return 0;
2231
2232         return -ENOMEM;
2233 }
2234
2235 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2236                                    struct hclge_pkt_buf_alloc *buf_alloc)
2237 {
2238         struct hclge_rx_priv_buff_cmd *req;
2239         struct hclge_desc desc;
2240         int ret;
2241         int i;
2242
2243         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2244         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2245
2246         /* Alloc private buffer TCs */
2247         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2248                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2249
2250                 req->buf_num[i] =
2251                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2252                 req->buf_num[i] |=
2253                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2254         }
2255
2256         req->shared_buf =
2257                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2258                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2259
2260         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2261         if (ret)
2262                 dev_err(&hdev->pdev->dev,
2263                         "rx private buffer alloc cmd failed %d\n", ret);
2264
2265         return ret;
2266 }
2267
2268 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2269                                    struct hclge_pkt_buf_alloc *buf_alloc)
2270 {
2271         struct hclge_rx_priv_wl_buf *req;
2272         struct hclge_priv_buf *priv;
2273         struct hclge_desc desc[2];
2274         int i, j;
2275         int ret;
2276
2277         for (i = 0; i < 2; i++) {
2278                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2279                                            false);
2280                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2281
2282                 /* The first descriptor set the NEXT bit to 1 */
2283                 if (i == 0)
2284                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2285                 else
2286                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2287
2288                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2289                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2290
2291                         priv = &buf_alloc->priv_buf[idx];
2292                         req->tc_wl[j].high =
2293                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2294                         req->tc_wl[j].high |=
2295                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2296                         req->tc_wl[j].low =
2297                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2298                         req->tc_wl[j].low |=
2299                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2300                 }
2301         }
2302
2303         /* Send 2 descriptor at one time */
2304         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2305         if (ret)
2306                 dev_err(&hdev->pdev->dev,
2307                         "rx private waterline config cmd failed %d\n",
2308                         ret);
2309         return ret;
2310 }
2311
2312 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2313                                     struct hclge_pkt_buf_alloc *buf_alloc)
2314 {
2315         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2316         struct hclge_rx_com_thrd *req;
2317         struct hclge_desc desc[2];
2318         struct hclge_tc_thrd *tc;
2319         int i, j;
2320         int ret;
2321
2322         for (i = 0; i < 2; i++) {
2323                 hclge_cmd_setup_basic_desc(&desc[i],
2324                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2325                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2326
2327                 /* The first descriptor set the NEXT bit to 1 */
2328                 if (i == 0)
2329                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2330                 else
2331                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2332
2333                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2334                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2335
2336                         req->com_thrd[j].high =
2337                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2338                         req->com_thrd[j].high |=
2339                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2340                         req->com_thrd[j].low =
2341                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2342                         req->com_thrd[j].low |=
2343                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2344                 }
2345         }
2346
2347         /* Send 2 descriptors at one time */
2348         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2349         if (ret)
2350                 dev_err(&hdev->pdev->dev,
2351                         "common threshold config cmd failed %d\n", ret);
2352         return ret;
2353 }
2354
2355 static int hclge_common_wl_config(struct hclge_dev *hdev,
2356                                   struct hclge_pkt_buf_alloc *buf_alloc)
2357 {
2358         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2359         struct hclge_rx_com_wl *req;
2360         struct hclge_desc desc;
2361         int ret;
2362
2363         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2364
2365         req = (struct hclge_rx_com_wl *)desc.data;
2366         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2367         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2368
2369         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2370         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2371
2372         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2373         if (ret)
2374                 dev_err(&hdev->pdev->dev,
2375                         "common waterline config cmd failed %d\n", ret);
2376
2377         return ret;
2378 }
2379
2380 int hclge_buffer_alloc(struct hclge_dev *hdev)
2381 {
2382         struct hclge_pkt_buf_alloc *pkt_buf;
2383         int ret;
2384
2385         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2386         if (!pkt_buf)
2387                 return -ENOMEM;
2388
2389         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2390         if (ret) {
2391                 dev_err(&hdev->pdev->dev,
2392                         "could not calc tx buffer size for all TCs %d\n", ret);
2393                 goto out;
2394         }
2395
2396         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2397         if (ret) {
2398                 dev_err(&hdev->pdev->dev,
2399                         "could not alloc tx buffers %d\n", ret);
2400                 goto out;
2401         }
2402
2403         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2404         if (ret) {
2405                 dev_err(&hdev->pdev->dev,
2406                         "could not calc rx priv buffer size for all TCs %d\n",
2407                         ret);
2408                 goto out;
2409         }
2410
2411         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2412         if (ret) {
2413                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2414                         ret);
2415                 goto out;
2416         }
2417
2418         if (hnae3_dev_dcb_supported(hdev)) {
2419                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2420                 if (ret) {
2421                         dev_err(&hdev->pdev->dev,
2422                                 "could not configure rx private waterline %d\n",
2423                                 ret);
2424                         goto out;
2425                 }
2426
2427                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2428                 if (ret) {
2429                         dev_err(&hdev->pdev->dev,
2430                                 "could not configure common threshold %d\n",
2431                                 ret);
2432                         goto out;
2433                 }
2434         }
2435
2436         ret = hclge_common_wl_config(hdev, pkt_buf);
2437         if (ret)
2438                 dev_err(&hdev->pdev->dev,
2439                         "could not configure common waterline %d\n", ret);
2440
2441 out:
2442         kfree(pkt_buf);
2443         return ret;
2444 }
2445
2446 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2447 {
2448         struct hnae3_handle *roce = &vport->roce;
2449         struct hnae3_handle *nic = &vport->nic;
2450         struct hclge_dev *hdev = vport->back;
2451
2452         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2453
2454         if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2455                 return -EINVAL;
2456
2457         roce->rinfo.base_vector = hdev->roce_base_vector;
2458
2459         roce->rinfo.netdev = nic->kinfo.netdev;
2460         roce->rinfo.roce_io_base = hdev->hw.io_base;
2461         roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2462
2463         roce->pdev = nic->pdev;
2464         roce->ae_algo = nic->ae_algo;
2465         roce->numa_node_mask = nic->numa_node_mask;
2466
2467         return 0;
2468 }
2469
2470 static int hclge_init_msi(struct hclge_dev *hdev)
2471 {
2472         struct pci_dev *pdev = hdev->pdev;
2473         int vectors;
2474         int i;
2475
2476         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2477                                         hdev->num_msi,
2478                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2479         if (vectors < 0) {
2480                 dev_err(&pdev->dev,
2481                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2482                         vectors);
2483                 return vectors;
2484         }
2485         if (vectors < hdev->num_msi)
2486                 dev_warn(&hdev->pdev->dev,
2487                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2488                          hdev->num_msi, vectors);
2489
2490         hdev->num_msi = vectors;
2491         hdev->num_msi_left = vectors;
2492
2493         hdev->base_msi_vector = pdev->irq;
2494         hdev->roce_base_vector = hdev->base_msi_vector +
2495                                 hdev->num_nic_msi;
2496
2497         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2498                                            sizeof(u16), GFP_KERNEL);
2499         if (!hdev->vector_status) {
2500                 pci_free_irq_vectors(pdev);
2501                 return -ENOMEM;
2502         }
2503
2504         for (i = 0; i < hdev->num_msi; i++)
2505                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2506
2507         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2508                                         sizeof(int), GFP_KERNEL);
2509         if (!hdev->vector_irq) {
2510                 pci_free_irq_vectors(pdev);
2511                 return -ENOMEM;
2512         }
2513
2514         return 0;
2515 }
2516
2517 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2518 {
2519         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2520                 duplex = HCLGE_MAC_FULL;
2521
2522         return duplex;
2523 }
2524
2525 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2526                                       u8 duplex)
2527 {
2528         struct hclge_config_mac_speed_dup_cmd *req;
2529         struct hclge_desc desc;
2530         int ret;
2531
2532         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2533
2534         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2535
2536         if (duplex)
2537                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2538
2539         switch (speed) {
2540         case HCLGE_MAC_SPEED_10M:
2541                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2542                                 HCLGE_CFG_SPEED_S, 6);
2543                 break;
2544         case HCLGE_MAC_SPEED_100M:
2545                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2546                                 HCLGE_CFG_SPEED_S, 7);
2547                 break;
2548         case HCLGE_MAC_SPEED_1G:
2549                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2550                                 HCLGE_CFG_SPEED_S, 0);
2551                 break;
2552         case HCLGE_MAC_SPEED_10G:
2553                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2554                                 HCLGE_CFG_SPEED_S, 1);
2555                 break;
2556         case HCLGE_MAC_SPEED_25G:
2557                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2558                                 HCLGE_CFG_SPEED_S, 2);
2559                 break;
2560         case HCLGE_MAC_SPEED_40G:
2561                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2562                                 HCLGE_CFG_SPEED_S, 3);
2563                 break;
2564         case HCLGE_MAC_SPEED_50G:
2565                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2566                                 HCLGE_CFG_SPEED_S, 4);
2567                 break;
2568         case HCLGE_MAC_SPEED_100G:
2569                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570                                 HCLGE_CFG_SPEED_S, 5);
2571                 break;
2572         case HCLGE_MAC_SPEED_200G:
2573                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574                                 HCLGE_CFG_SPEED_S, 8);
2575                 break;
2576         default:
2577                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2578                 return -EINVAL;
2579         }
2580
2581         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2582                       1);
2583
2584         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2585         if (ret) {
2586                 dev_err(&hdev->pdev->dev,
2587                         "mac speed/duplex config cmd failed %d.\n", ret);
2588                 return ret;
2589         }
2590
2591         return 0;
2592 }
2593
2594 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2595 {
2596         struct hclge_mac *mac = &hdev->hw.mac;
2597         int ret;
2598
2599         duplex = hclge_check_speed_dup(duplex, speed);
2600         if (!mac->support_autoneg && mac->speed == speed &&
2601             mac->duplex == duplex)
2602                 return 0;
2603
2604         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2605         if (ret)
2606                 return ret;
2607
2608         hdev->hw.mac.speed = speed;
2609         hdev->hw.mac.duplex = duplex;
2610
2611         return 0;
2612 }
2613
2614 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2615                                      u8 duplex)
2616 {
2617         struct hclge_vport *vport = hclge_get_vport(handle);
2618         struct hclge_dev *hdev = vport->back;
2619
2620         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2621 }
2622
2623 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2624 {
2625         struct hclge_config_auto_neg_cmd *req;
2626         struct hclge_desc desc;
2627         u32 flag = 0;
2628         int ret;
2629
2630         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2631
2632         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2633         if (enable)
2634                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2635         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2636
2637         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2638         if (ret)
2639                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2640                         ret);
2641
2642         return ret;
2643 }
2644
2645 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2646 {
2647         struct hclge_vport *vport = hclge_get_vport(handle);
2648         struct hclge_dev *hdev = vport->back;
2649
2650         if (!hdev->hw.mac.support_autoneg) {
2651                 if (enable) {
2652                         dev_err(&hdev->pdev->dev,
2653                                 "autoneg is not supported by current port\n");
2654                         return -EOPNOTSUPP;
2655                 } else {
2656                         return 0;
2657                 }
2658         }
2659
2660         return hclge_set_autoneg_en(hdev, enable);
2661 }
2662
2663 static int hclge_get_autoneg(struct hnae3_handle *handle)
2664 {
2665         struct hclge_vport *vport = hclge_get_vport(handle);
2666         struct hclge_dev *hdev = vport->back;
2667         struct phy_device *phydev = hdev->hw.mac.phydev;
2668
2669         if (phydev)
2670                 return phydev->autoneg;
2671
2672         return hdev->hw.mac.autoneg;
2673 }
2674
2675 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2676 {
2677         struct hclge_vport *vport = hclge_get_vport(handle);
2678         struct hclge_dev *hdev = vport->back;
2679         int ret;
2680
2681         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2682
2683         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2684         if (ret)
2685                 return ret;
2686         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2687 }
2688
2689 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2690 {
2691         struct hclge_vport *vport = hclge_get_vport(handle);
2692         struct hclge_dev *hdev = vport->back;
2693
2694         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2695                 return hclge_set_autoneg_en(hdev, !halt);
2696
2697         return 0;
2698 }
2699
2700 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2701 {
2702         struct hclge_config_fec_cmd *req;
2703         struct hclge_desc desc;
2704         int ret;
2705
2706         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2707
2708         req = (struct hclge_config_fec_cmd *)desc.data;
2709         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2710                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2711         if (fec_mode & BIT(HNAE3_FEC_RS))
2712                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2713                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2714         if (fec_mode & BIT(HNAE3_FEC_BASER))
2715                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2716                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2717
2718         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2719         if (ret)
2720                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2721
2722         return ret;
2723 }
2724
2725 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2726 {
2727         struct hclge_vport *vport = hclge_get_vport(handle);
2728         struct hclge_dev *hdev = vport->back;
2729         struct hclge_mac *mac = &hdev->hw.mac;
2730         int ret;
2731
2732         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2733                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2734                 return -EINVAL;
2735         }
2736
2737         ret = hclge_set_fec_hw(hdev, fec_mode);
2738         if (ret)
2739                 return ret;
2740
2741         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2742         return 0;
2743 }
2744
2745 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2746                           u8 *fec_mode)
2747 {
2748         struct hclge_vport *vport = hclge_get_vport(handle);
2749         struct hclge_dev *hdev = vport->back;
2750         struct hclge_mac *mac = &hdev->hw.mac;
2751
2752         if (fec_ability)
2753                 *fec_ability = mac->fec_ability;
2754         if (fec_mode)
2755                 *fec_mode = mac->fec_mode;
2756 }
2757
2758 static int hclge_mac_init(struct hclge_dev *hdev)
2759 {
2760         struct hclge_mac *mac = &hdev->hw.mac;
2761         int ret;
2762
2763         hdev->support_sfp_query = true;
2764         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2765         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2766                                          hdev->hw.mac.duplex);
2767         if (ret)
2768                 return ret;
2769
2770         if (hdev->hw.mac.support_autoneg) {
2771                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2772                 if (ret)
2773                         return ret;
2774         }
2775
2776         mac->link = 0;
2777
2778         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2779                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2780                 if (ret)
2781                         return ret;
2782         }
2783
2784         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2785         if (ret) {
2786                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2787                 return ret;
2788         }
2789
2790         ret = hclge_set_default_loopback(hdev);
2791         if (ret)
2792                 return ret;
2793
2794         ret = hclge_buffer_alloc(hdev);
2795         if (ret)
2796                 dev_err(&hdev->pdev->dev,
2797                         "allocate buffer fail, ret=%d\n", ret);
2798
2799         return ret;
2800 }
2801
2802 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2803 {
2804         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2805             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2806                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2807                                     hclge_wq, &hdev->service_task, 0);
2808 }
2809
2810 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2811 {
2812         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2813             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2814                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2815                                     hclge_wq, &hdev->service_task, 0);
2816 }
2817
2818 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2819 {
2820         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2821             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2822                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2823                                     hclge_wq, &hdev->service_task,
2824                                     delay_time);
2825 }
2826
2827 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2828 {
2829         struct hclge_link_status_cmd *req;
2830         struct hclge_desc desc;
2831         int ret;
2832
2833         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2834         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2835         if (ret) {
2836                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2837                         ret);
2838                 return ret;
2839         }
2840
2841         req = (struct hclge_link_status_cmd *)desc.data;
2842         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2843                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2844
2845         return 0;
2846 }
2847
2848 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2849 {
2850         struct phy_device *phydev = hdev->hw.mac.phydev;
2851
2852         *link_status = HCLGE_LINK_STATUS_DOWN;
2853
2854         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2855                 return 0;
2856
2857         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2858                 return 0;
2859
2860         return hclge_get_mac_link_status(hdev, link_status);
2861 }
2862
2863 static void hclge_update_link_status(struct hclge_dev *hdev)
2864 {
2865         struct hnae3_client *rclient = hdev->roce_client;
2866         struct hnae3_client *client = hdev->nic_client;
2867         struct hnae3_handle *rhandle;
2868         struct hnae3_handle *handle;
2869         int state;
2870         int ret;
2871         int i;
2872
2873         if (!client)
2874                 return;
2875
2876         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2877                 return;
2878
2879         ret = hclge_get_mac_phy_link(hdev, &state);
2880         if (ret) {
2881                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2882                 return;
2883         }
2884
2885         if (state != hdev->hw.mac.link) {
2886                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2887                         handle = &hdev->vport[i].nic;
2888                         client->ops->link_status_change(handle, state);
2889                         hclge_config_mac_tnl_int(hdev, state);
2890                         rhandle = &hdev->vport[i].roce;
2891                         if (rclient && rclient->ops->link_status_change)
2892                                 rclient->ops->link_status_change(rhandle,
2893                                                                  state);
2894                 }
2895                 hdev->hw.mac.link = state;
2896         }
2897
2898         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2899 }
2900
2901 static void hclge_update_port_capability(struct hclge_dev *hdev,
2902                                          struct hclge_mac *mac)
2903 {
2904         if (hnae3_dev_fec_supported(hdev))
2905                 /* update fec ability by speed */
2906                 hclge_convert_setting_fec(mac);
2907
2908         /* firmware can not identify back plane type, the media type
2909          * read from configuration can help deal it
2910          */
2911         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2912             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2913                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2914         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2915                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2916
2917         if (mac->support_autoneg) {
2918                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2919                 linkmode_copy(mac->advertising, mac->supported);
2920         } else {
2921                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2922                                    mac->supported);
2923                 linkmode_zero(mac->advertising);
2924         }
2925 }
2926
2927 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2928 {
2929         struct hclge_sfp_info_cmd *resp;
2930         struct hclge_desc desc;
2931         int ret;
2932
2933         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2934         resp = (struct hclge_sfp_info_cmd *)desc.data;
2935         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2936         if (ret == -EOPNOTSUPP) {
2937                 dev_warn(&hdev->pdev->dev,
2938                          "IMP do not support get SFP speed %d\n", ret);
2939                 return ret;
2940         } else if (ret) {
2941                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2942                 return ret;
2943         }
2944
2945         *speed = le32_to_cpu(resp->speed);
2946
2947         return 0;
2948 }
2949
2950 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2951 {
2952         struct hclge_sfp_info_cmd *resp;
2953         struct hclge_desc desc;
2954         int ret;
2955
2956         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2957         resp = (struct hclge_sfp_info_cmd *)desc.data;
2958
2959         resp->query_type = QUERY_ACTIVE_SPEED;
2960
2961         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2962         if (ret == -EOPNOTSUPP) {
2963                 dev_warn(&hdev->pdev->dev,
2964                          "IMP does not support get SFP info %d\n", ret);
2965                 return ret;
2966         } else if (ret) {
2967                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2968                 return ret;
2969         }
2970
2971         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2972          * set to mac->speed.
2973          */
2974         if (!le32_to_cpu(resp->speed))
2975                 return 0;
2976
2977         mac->speed = le32_to_cpu(resp->speed);
2978         /* if resp->speed_ability is 0, it means it's an old version
2979          * firmware, do not update these params
2980          */
2981         if (resp->speed_ability) {
2982                 mac->module_type = le32_to_cpu(resp->module_type);
2983                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2984                 mac->autoneg = resp->autoneg;
2985                 mac->support_autoneg = resp->autoneg_ability;
2986                 mac->speed_type = QUERY_ACTIVE_SPEED;
2987                 if (!resp->active_fec)
2988                         mac->fec_mode = 0;
2989                 else
2990                         mac->fec_mode = BIT(resp->active_fec);
2991         } else {
2992                 mac->speed_type = QUERY_SFP_SPEED;
2993         }
2994
2995         return 0;
2996 }
2997
2998 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
2999                                         struct ethtool_link_ksettings *cmd)
3000 {
3001         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3002         struct hclge_vport *vport = hclge_get_vport(handle);
3003         struct hclge_phy_link_ksetting_0_cmd *req0;
3004         struct hclge_phy_link_ksetting_1_cmd *req1;
3005         u32 supported, advertising, lp_advertising;
3006         struct hclge_dev *hdev = vport->back;
3007         int ret;
3008
3009         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3010                                    true);
3011         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3012         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3013                                    true);
3014
3015         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3016         if (ret) {
3017                 dev_err(&hdev->pdev->dev,
3018                         "failed to get phy link ksetting, ret = %d.\n", ret);
3019                 return ret;
3020         }
3021
3022         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3023         cmd->base.autoneg = req0->autoneg;
3024         cmd->base.speed = le32_to_cpu(req0->speed);
3025         cmd->base.duplex = req0->duplex;
3026         cmd->base.port = req0->port;
3027         cmd->base.transceiver = req0->transceiver;
3028         cmd->base.phy_address = req0->phy_address;
3029         cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3030         cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3031         supported = le32_to_cpu(req0->supported);
3032         advertising = le32_to_cpu(req0->advertising);
3033         lp_advertising = le32_to_cpu(req0->lp_advertising);
3034         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3035                                                 supported);
3036         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3037                                                 advertising);
3038         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3039                                                 lp_advertising);
3040
3041         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3042         cmd->base.master_slave_cfg = req1->master_slave_cfg;
3043         cmd->base.master_slave_state = req1->master_slave_state;
3044
3045         return 0;
3046 }
3047
3048 static int
3049 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3050                              const struct ethtool_link_ksettings *cmd)
3051 {
3052         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3053         struct hclge_vport *vport = hclge_get_vport(handle);
3054         struct hclge_phy_link_ksetting_0_cmd *req0;
3055         struct hclge_phy_link_ksetting_1_cmd *req1;
3056         struct hclge_dev *hdev = vport->back;
3057         u32 advertising;
3058         int ret;
3059
3060         if (cmd->base.autoneg == AUTONEG_DISABLE &&
3061             ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3062              (cmd->base.duplex != DUPLEX_HALF &&
3063               cmd->base.duplex != DUPLEX_FULL)))
3064                 return -EINVAL;
3065
3066         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3067                                    false);
3068         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3069         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3070                                    false);
3071
3072         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3073         req0->autoneg = cmd->base.autoneg;
3074         req0->speed = cpu_to_le32(cmd->base.speed);
3075         req0->duplex = cmd->base.duplex;
3076         ethtool_convert_link_mode_to_legacy_u32(&advertising,
3077                                                 cmd->link_modes.advertising);
3078         req0->advertising = cpu_to_le32(advertising);
3079         req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3080
3081         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3082         req1->master_slave_cfg = cmd->base.master_slave_cfg;
3083
3084         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3085         if (ret) {
3086                 dev_err(&hdev->pdev->dev,
3087                         "failed to set phy link ksettings, ret = %d.\n", ret);
3088                 return ret;
3089         }
3090
3091         hdev->hw.mac.autoneg = cmd->base.autoneg;
3092         hdev->hw.mac.speed = cmd->base.speed;
3093         hdev->hw.mac.duplex = cmd->base.duplex;
3094         linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3095
3096         return 0;
3097 }
3098
3099 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3100 {
3101         struct ethtool_link_ksettings cmd;
3102         int ret;
3103
3104         if (!hnae3_dev_phy_imp_supported(hdev))
3105                 return 0;
3106
3107         ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3108         if (ret)
3109                 return ret;
3110
3111         hdev->hw.mac.autoneg = cmd.base.autoneg;
3112         hdev->hw.mac.speed = cmd.base.speed;
3113         hdev->hw.mac.duplex = cmd.base.duplex;
3114
3115         return 0;
3116 }
3117
3118 static int hclge_tp_port_init(struct hclge_dev *hdev)
3119 {
3120         struct ethtool_link_ksettings cmd;
3121
3122         if (!hnae3_dev_phy_imp_supported(hdev))
3123                 return 0;
3124
3125         cmd.base.autoneg = hdev->hw.mac.autoneg;
3126         cmd.base.speed = hdev->hw.mac.speed;
3127         cmd.base.duplex = hdev->hw.mac.duplex;
3128         linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3129
3130         return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3131 }
3132
3133 static int hclge_update_port_info(struct hclge_dev *hdev)
3134 {
3135         struct hclge_mac *mac = &hdev->hw.mac;
3136         int speed = HCLGE_MAC_SPEED_UNKNOWN;
3137         int ret;
3138
3139         /* get the port info from SFP cmd if not copper port */
3140         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3141                 return hclge_update_tp_port_info(hdev);
3142
3143         /* if IMP does not support get SFP/qSFP info, return directly */
3144         if (!hdev->support_sfp_query)
3145                 return 0;
3146
3147         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3148                 ret = hclge_get_sfp_info(hdev, mac);
3149         else
3150                 ret = hclge_get_sfp_speed(hdev, &speed);
3151
3152         if (ret == -EOPNOTSUPP) {
3153                 hdev->support_sfp_query = false;
3154                 return ret;
3155         } else if (ret) {
3156                 return ret;
3157         }
3158
3159         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3160                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3161                         hclge_update_port_capability(hdev, mac);
3162                         return 0;
3163                 }
3164                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3165                                                HCLGE_MAC_FULL);
3166         } else {
3167                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3168                         return 0; /* do nothing if no SFP */
3169
3170                 /* must config full duplex for SFP */
3171                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3172         }
3173 }
3174
3175 static int hclge_get_status(struct hnae3_handle *handle)
3176 {
3177         struct hclge_vport *vport = hclge_get_vport(handle);
3178         struct hclge_dev *hdev = vport->back;
3179
3180         hclge_update_link_status(hdev);
3181
3182         return hdev->hw.mac.link;
3183 }
3184
3185 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3186 {
3187         if (!pci_num_vf(hdev->pdev)) {
3188                 dev_err(&hdev->pdev->dev,
3189                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3190                 return NULL;
3191         }
3192
3193         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3194                 dev_err(&hdev->pdev->dev,
3195                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3196                         vf, pci_num_vf(hdev->pdev));
3197                 return NULL;
3198         }
3199
3200         /* VF start from 1 in vport */
3201         vf += HCLGE_VF_VPORT_START_NUM;
3202         return &hdev->vport[vf];
3203 }
3204
3205 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3206                                struct ifla_vf_info *ivf)
3207 {
3208         struct hclge_vport *vport = hclge_get_vport(handle);
3209         struct hclge_dev *hdev = vport->back;
3210
3211         vport = hclge_get_vf_vport(hdev, vf);
3212         if (!vport)
3213                 return -EINVAL;
3214
3215         ivf->vf = vf;
3216         ivf->linkstate = vport->vf_info.link_state;
3217         ivf->spoofchk = vport->vf_info.spoofchk;
3218         ivf->trusted = vport->vf_info.trusted;
3219         ivf->min_tx_rate = 0;
3220         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3221         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3222         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3223         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3224         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3225
3226         return 0;
3227 }
3228
3229 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3230                                    int link_state)
3231 {
3232         struct hclge_vport *vport = hclge_get_vport(handle);
3233         struct hclge_dev *hdev = vport->back;
3234
3235         vport = hclge_get_vf_vport(hdev, vf);
3236         if (!vport)
3237                 return -EINVAL;
3238
3239         vport->vf_info.link_state = link_state;
3240
3241         return 0;
3242 }
3243
3244 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3245 {
3246         u32 cmdq_src_reg, msix_src_reg;
3247
3248         /* fetch the events from their corresponding regs */
3249         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3250         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3251
3252         /* Assumption: If by any chance reset and mailbox events are reported
3253          * together then we will only process reset event in this go and will
3254          * defer the processing of the mailbox events. Since, we would have not
3255          * cleared RX CMDQ event this time we would receive again another
3256          * interrupt from H/W just for the mailbox.
3257          *
3258          * check for vector0 reset event sources
3259          */
3260         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3261                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3262                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3263                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3264                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3265                 hdev->rst_stats.imp_rst_cnt++;
3266                 return HCLGE_VECTOR0_EVENT_RST;
3267         }
3268
3269         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3270                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3271                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3272                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3273                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3274                 hdev->rst_stats.global_rst_cnt++;
3275                 return HCLGE_VECTOR0_EVENT_RST;
3276         }
3277
3278         /* check for vector0 msix event source */
3279         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3280                 *clearval = msix_src_reg;
3281                 return HCLGE_VECTOR0_EVENT_ERR;
3282         }
3283
3284         /* check for vector0 mailbox(=CMDQ RX) event source */
3285         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3286                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3287                 *clearval = cmdq_src_reg;
3288                 return HCLGE_VECTOR0_EVENT_MBX;
3289         }
3290
3291         /* print other vector0 event source */
3292         dev_info(&hdev->pdev->dev,
3293                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3294                  cmdq_src_reg, msix_src_reg);
3295         *clearval = msix_src_reg;
3296
3297         return HCLGE_VECTOR0_EVENT_OTHER;
3298 }
3299
3300 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3301                                     u32 regclr)
3302 {
3303         switch (event_type) {
3304         case HCLGE_VECTOR0_EVENT_RST:
3305                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3306                 break;
3307         case HCLGE_VECTOR0_EVENT_MBX:
3308                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3309                 break;
3310         default:
3311                 break;
3312         }
3313 }
3314
3315 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3316 {
3317         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3318                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3319                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3320                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3321         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3322 }
3323
3324 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3325 {
3326         writel(enable ? 1 : 0, vector->addr);
3327 }
3328
3329 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3330 {
3331         struct hclge_dev *hdev = data;
3332         u32 clearval = 0;
3333         u32 event_cause;
3334
3335         hclge_enable_vector(&hdev->misc_vector, false);
3336         event_cause = hclge_check_event_cause(hdev, &clearval);
3337
3338         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3339         switch (event_cause) {
3340         case HCLGE_VECTOR0_EVENT_ERR:
3341                 /* we do not know what type of reset is required now. This could
3342                  * only be decided after we fetch the type of errors which
3343                  * caused this event. Therefore, we will do below for now:
3344                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3345                  *    have defered type of reset to be used.
3346                  * 2. Schedule the reset serivce task.
3347                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3348                  *    will fetch the correct type of reset.  This would be done
3349                  *    by first decoding the types of errors.
3350                  */
3351                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3352                 fallthrough;
3353         case HCLGE_VECTOR0_EVENT_RST:
3354                 hclge_reset_task_schedule(hdev);
3355                 break;
3356         case HCLGE_VECTOR0_EVENT_MBX:
3357                 /* If we are here then,
3358                  * 1. Either we are not handling any mbx task and we are not
3359                  *    scheduled as well
3360                  *                        OR
3361                  * 2. We could be handling a mbx task but nothing more is
3362                  *    scheduled.
3363                  * In both cases, we should schedule mbx task as there are more
3364                  * mbx messages reported by this interrupt.
3365                  */
3366                 hclge_mbx_task_schedule(hdev);
3367                 break;
3368         default:
3369                 dev_warn(&hdev->pdev->dev,
3370                          "received unknown or unhandled event of vector0\n");
3371                 break;
3372         }
3373
3374         hclge_clear_event_cause(hdev, event_cause, clearval);
3375
3376         /* Enable interrupt if it is not cause by reset. And when
3377          * clearval equal to 0, it means interrupt status may be
3378          * cleared by hardware before driver reads status register.
3379          * For this case, vector0 interrupt also should be enabled.
3380          */
3381         if (!clearval ||
3382             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3383                 hclge_enable_vector(&hdev->misc_vector, true);
3384         }
3385
3386         return IRQ_HANDLED;
3387 }
3388
3389 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3390 {
3391         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3392                 dev_warn(&hdev->pdev->dev,
3393                          "vector(vector_id %d) has been freed.\n", vector_id);
3394                 return;
3395         }
3396
3397         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3398         hdev->num_msi_left += 1;
3399         hdev->num_msi_used -= 1;
3400 }
3401
3402 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3403 {
3404         struct hclge_misc_vector *vector = &hdev->misc_vector;
3405
3406         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3407
3408         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3409         hdev->vector_status[0] = 0;
3410
3411         hdev->num_msi_left -= 1;
3412         hdev->num_msi_used += 1;
3413 }
3414
3415 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3416                                       const cpumask_t *mask)
3417 {
3418         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3419                                               affinity_notify);
3420
3421         cpumask_copy(&hdev->affinity_mask, mask);
3422 }
3423
3424 static void hclge_irq_affinity_release(struct kref *ref)
3425 {
3426 }
3427
3428 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3429 {
3430         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3431                               &hdev->affinity_mask);
3432
3433         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3434         hdev->affinity_notify.release = hclge_irq_affinity_release;
3435         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3436                                   &hdev->affinity_notify);
3437 }
3438
3439 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3440 {
3441         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3442         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3443 }
3444
3445 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3446 {
3447         int ret;
3448
3449         hclge_get_misc_vector(hdev);
3450
3451         /* this would be explicitly freed in the end */
3452         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3453                  HCLGE_NAME, pci_name(hdev->pdev));
3454         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3455                           0, hdev->misc_vector.name, hdev);
3456         if (ret) {
3457                 hclge_free_vector(hdev, 0);
3458                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3459                         hdev->misc_vector.vector_irq);
3460         }
3461
3462         return ret;
3463 }
3464
3465 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3466 {
3467         free_irq(hdev->misc_vector.vector_irq, hdev);
3468         hclge_free_vector(hdev, 0);
3469 }
3470
3471 int hclge_notify_client(struct hclge_dev *hdev,
3472                         enum hnae3_reset_notify_type type)
3473 {
3474         struct hnae3_client *client = hdev->nic_client;
3475         u16 i;
3476
3477         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3478                 return 0;
3479
3480         if (!client->ops->reset_notify)
3481                 return -EOPNOTSUPP;
3482
3483         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3484                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3485                 int ret;
3486
3487                 ret = client->ops->reset_notify(handle, type);
3488                 if (ret) {
3489                         dev_err(&hdev->pdev->dev,
3490                                 "notify nic client failed %d(%d)\n", type, ret);
3491                         return ret;
3492                 }
3493         }
3494
3495         return 0;
3496 }
3497
3498 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3499                                     enum hnae3_reset_notify_type type)
3500 {
3501         struct hnae3_client *client = hdev->roce_client;
3502         int ret;
3503         u16 i;
3504
3505         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3506                 return 0;
3507
3508         if (!client->ops->reset_notify)
3509                 return -EOPNOTSUPP;
3510
3511         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3512                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3513
3514                 ret = client->ops->reset_notify(handle, type);
3515                 if (ret) {
3516                         dev_err(&hdev->pdev->dev,
3517                                 "notify roce client failed %d(%d)",
3518                                 type, ret);
3519                         return ret;
3520                 }
3521         }
3522
3523         return ret;
3524 }
3525
3526 static int hclge_reset_wait(struct hclge_dev *hdev)
3527 {
3528 #define HCLGE_RESET_WATI_MS     100
3529 #define HCLGE_RESET_WAIT_CNT    350
3530
3531         u32 val, reg, reg_bit;
3532         u32 cnt = 0;
3533
3534         switch (hdev->reset_type) {
3535         case HNAE3_IMP_RESET:
3536                 reg = HCLGE_GLOBAL_RESET_REG;
3537                 reg_bit = HCLGE_IMP_RESET_BIT;
3538                 break;
3539         case HNAE3_GLOBAL_RESET:
3540                 reg = HCLGE_GLOBAL_RESET_REG;
3541                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3542                 break;
3543         case HNAE3_FUNC_RESET:
3544                 reg = HCLGE_FUN_RST_ING;
3545                 reg_bit = HCLGE_FUN_RST_ING_B;
3546                 break;
3547         default:
3548                 dev_err(&hdev->pdev->dev,
3549                         "Wait for unsupported reset type: %d\n",
3550                         hdev->reset_type);
3551                 return -EINVAL;
3552         }
3553
3554         val = hclge_read_dev(&hdev->hw, reg);
3555         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3556                 msleep(HCLGE_RESET_WATI_MS);
3557                 val = hclge_read_dev(&hdev->hw, reg);
3558                 cnt++;
3559         }
3560
3561         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3562                 dev_warn(&hdev->pdev->dev,
3563                          "Wait for reset timeout: %d\n", hdev->reset_type);
3564                 return -EBUSY;
3565         }
3566
3567         return 0;
3568 }
3569
3570 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3571 {
3572         struct hclge_vf_rst_cmd *req;
3573         struct hclge_desc desc;
3574
3575         req = (struct hclge_vf_rst_cmd *)desc.data;
3576         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3577         req->dest_vfid = func_id;
3578
3579         if (reset)
3580                 req->vf_rst = 0x1;
3581
3582         return hclge_cmd_send(&hdev->hw, &desc, 1);
3583 }
3584
3585 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3586 {
3587         int i;
3588
3589         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3590                 struct hclge_vport *vport = &hdev->vport[i];
3591                 int ret;
3592
3593                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3594                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3595                 if (ret) {
3596                         dev_err(&hdev->pdev->dev,
3597                                 "set vf(%u) rst failed %d!\n",
3598                                 vport->vport_id, ret);
3599                         return ret;
3600                 }
3601
3602                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3603                         continue;
3604
3605                 /* Inform VF to process the reset.
3606                  * hclge_inform_reset_assert_to_vf may fail if VF
3607                  * driver is not loaded.
3608                  */
3609                 ret = hclge_inform_reset_assert_to_vf(vport);
3610                 if (ret)
3611                         dev_warn(&hdev->pdev->dev,
3612                                  "inform reset to vf(%u) failed %d!\n",
3613                                  vport->vport_id, ret);
3614         }
3615
3616         return 0;
3617 }
3618
3619 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3620 {
3621         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3622             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3623             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3624                 return;
3625
3626         hclge_mbx_handler(hdev);
3627
3628         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3629 }
3630
3631 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3632 {
3633         struct hclge_pf_rst_sync_cmd *req;
3634         struct hclge_desc desc;
3635         int cnt = 0;
3636         int ret;
3637
3638         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3639         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3640
3641         do {
3642                 /* vf need to down netdev by mbx during PF or FLR reset */
3643                 hclge_mailbox_service_task(hdev);
3644
3645                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3646                 /* for compatible with old firmware, wait
3647                  * 100 ms for VF to stop IO
3648                  */
3649                 if (ret == -EOPNOTSUPP) {
3650                         msleep(HCLGE_RESET_SYNC_TIME);
3651                         return;
3652                 } else if (ret) {
3653                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3654                                  ret);
3655                         return;
3656                 } else if (req->all_vf_ready) {
3657                         return;
3658                 }
3659                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3660                 hclge_cmd_reuse_desc(&desc, true);
3661         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3662
3663         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3664 }
3665
3666 void hclge_report_hw_error(struct hclge_dev *hdev,
3667                            enum hnae3_hw_error_type type)
3668 {
3669         struct hnae3_client *client = hdev->nic_client;
3670         u16 i;
3671
3672         if (!client || !client->ops->process_hw_error ||
3673             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3674                 return;
3675
3676         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3677                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3678 }
3679
3680 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3681 {
3682         u32 reg_val;
3683
3684         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3685         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3686                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3687                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3688                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3689         }
3690
3691         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3692                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3693                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3694                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3695         }
3696 }
3697
3698 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3699 {
3700         struct hclge_desc desc;
3701         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3702         int ret;
3703
3704         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3705         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3706         req->fun_reset_vfid = func_id;
3707
3708         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3709         if (ret)
3710                 dev_err(&hdev->pdev->dev,
3711                         "send function reset cmd fail, status =%d\n", ret);
3712
3713         return ret;
3714 }
3715
3716 static void hclge_do_reset(struct hclge_dev *hdev)
3717 {
3718         struct hnae3_handle *handle = &hdev->vport[0].nic;
3719         struct pci_dev *pdev = hdev->pdev;
3720         u32 val;
3721
3722         if (hclge_get_hw_reset_stat(handle)) {
3723                 dev_info(&pdev->dev, "hardware reset not finish\n");
3724                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3725                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3726                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3727                 return;
3728         }
3729
3730         switch (hdev->reset_type) {
3731         case HNAE3_GLOBAL_RESET:
3732                 dev_info(&pdev->dev, "global reset requested\n");
3733                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3734                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3735                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3736                 break;
3737         case HNAE3_FUNC_RESET:
3738                 dev_info(&pdev->dev, "PF reset requested\n");
3739                 /* schedule again to check later */
3740                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3741                 hclge_reset_task_schedule(hdev);
3742                 break;
3743         default:
3744                 dev_warn(&pdev->dev,
3745                          "unsupported reset type: %d\n", hdev->reset_type);
3746                 break;
3747         }
3748 }
3749
3750 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3751                                                    unsigned long *addr)
3752 {
3753         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3754         struct hclge_dev *hdev = ae_dev->priv;
3755
3756         /* first, resolve any unknown reset type to the known type(s) */
3757         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3758                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3759                                         HCLGE_MISC_VECTOR_INT_STS);
3760                 /* we will intentionally ignore any errors from this function
3761                  *  as we will end up in *some* reset request in any case
3762                  */
3763                 if (hclge_handle_hw_msix_error(hdev, addr))
3764                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3765                                  msix_sts_reg);
3766
3767                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3768                 /* We defered the clearing of the error event which caused
3769                  * interrupt since it was not posssible to do that in
3770                  * interrupt context (and this is the reason we introduced
3771                  * new UNKNOWN reset type). Now, the errors have been
3772                  * handled and cleared in hardware we can safely enable
3773                  * interrupts. This is an exception to the norm.
3774                  */
3775                 hclge_enable_vector(&hdev->misc_vector, true);
3776         }
3777
3778         /* return the highest priority reset level amongst all */
3779         if (test_bit(HNAE3_IMP_RESET, addr)) {
3780                 rst_level = HNAE3_IMP_RESET;
3781                 clear_bit(HNAE3_IMP_RESET, addr);
3782                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3783                 clear_bit(HNAE3_FUNC_RESET, addr);
3784         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3785                 rst_level = HNAE3_GLOBAL_RESET;
3786                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3787                 clear_bit(HNAE3_FUNC_RESET, addr);
3788         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3789                 rst_level = HNAE3_FUNC_RESET;
3790                 clear_bit(HNAE3_FUNC_RESET, addr);
3791         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3792                 rst_level = HNAE3_FLR_RESET;
3793                 clear_bit(HNAE3_FLR_RESET, addr);
3794         }
3795
3796         if (hdev->reset_type != HNAE3_NONE_RESET &&
3797             rst_level < hdev->reset_type)
3798                 return HNAE3_NONE_RESET;
3799
3800         return rst_level;
3801 }
3802
3803 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3804 {
3805         u32 clearval = 0;
3806
3807         switch (hdev->reset_type) {
3808         case HNAE3_IMP_RESET:
3809                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3810                 break;
3811         case HNAE3_GLOBAL_RESET:
3812                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3813                 break;
3814         default:
3815                 break;
3816         }
3817
3818         if (!clearval)
3819                 return;
3820
3821         /* For revision 0x20, the reset interrupt source
3822          * can only be cleared after hardware reset done
3823          */
3824         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3825                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3826                                 clearval);
3827
3828         hclge_enable_vector(&hdev->misc_vector, true);
3829 }
3830
3831 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3832 {
3833         u32 reg_val;
3834
3835         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3836         if (enable)
3837                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3838         else
3839                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3840
3841         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3842 }
3843
3844 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3845 {
3846         int ret;
3847
3848         ret = hclge_set_all_vf_rst(hdev, true);
3849         if (ret)
3850                 return ret;
3851
3852         hclge_func_reset_sync_vf(hdev);
3853
3854         return 0;
3855 }
3856
3857 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3858 {
3859         u32 reg_val;
3860         int ret = 0;
3861
3862         switch (hdev->reset_type) {
3863         case HNAE3_FUNC_RESET:
3864                 ret = hclge_func_reset_notify_vf(hdev);
3865                 if (ret)
3866                         return ret;
3867
3868                 ret = hclge_func_reset_cmd(hdev, 0);
3869                 if (ret) {
3870                         dev_err(&hdev->pdev->dev,
3871                                 "asserting function reset fail %d!\n", ret);
3872                         return ret;
3873                 }
3874
3875                 /* After performaning pf reset, it is not necessary to do the
3876                  * mailbox handling or send any command to firmware, because
3877                  * any mailbox handling or command to firmware is only valid
3878                  * after hclge_cmd_init is called.
3879                  */
3880                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3881                 hdev->rst_stats.pf_rst_cnt++;
3882                 break;
3883         case HNAE3_FLR_RESET:
3884                 ret = hclge_func_reset_notify_vf(hdev);
3885                 if (ret)
3886                         return ret;
3887                 break;
3888         case HNAE3_IMP_RESET:
3889                 hclge_handle_imp_error(hdev);
3890                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3891                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3892                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3893                 break;
3894         default:
3895                 break;
3896         }
3897
3898         /* inform hardware that preparatory work is done */
3899         msleep(HCLGE_RESET_SYNC_TIME);
3900         hclge_reset_handshake(hdev, true);
3901         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3902
3903         return ret;
3904 }
3905
3906 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3907 {
3908 #define MAX_RESET_FAIL_CNT 5
3909
3910         if (hdev->reset_pending) {
3911                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3912                          hdev->reset_pending);
3913                 return true;
3914         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3915                    HCLGE_RESET_INT_M) {
3916                 dev_info(&hdev->pdev->dev,
3917                          "reset failed because new reset interrupt\n");
3918                 hclge_clear_reset_cause(hdev);
3919                 return false;
3920         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3921                 hdev->rst_stats.reset_fail_cnt++;
3922                 set_bit(hdev->reset_type, &hdev->reset_pending);
3923                 dev_info(&hdev->pdev->dev,
3924                          "re-schedule reset task(%u)\n",
3925                          hdev->rst_stats.reset_fail_cnt);
3926                 return true;
3927         }
3928
3929         hclge_clear_reset_cause(hdev);
3930
3931         /* recover the handshake status when reset fail */
3932         hclge_reset_handshake(hdev, true);
3933
3934         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3935
3936         hclge_dbg_dump_rst_info(hdev);
3937
3938         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3939
3940         return false;
3941 }
3942
3943 static int hclge_set_rst_done(struct hclge_dev *hdev)
3944 {
3945         struct hclge_pf_rst_done_cmd *req;
3946         struct hclge_desc desc;
3947         int ret;
3948
3949         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3950         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3951         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3952
3953         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3954         /* To be compatible with the old firmware, which does not support
3955          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3956          * return success
3957          */
3958         if (ret == -EOPNOTSUPP) {
3959                 dev_warn(&hdev->pdev->dev,
3960                          "current firmware does not support command(0x%x)!\n",
3961                          HCLGE_OPC_PF_RST_DONE);
3962                 return 0;
3963         } else if (ret) {
3964                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3965                         ret);
3966         }
3967
3968         return ret;
3969 }
3970
3971 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3972 {
3973         int ret = 0;
3974
3975         switch (hdev->reset_type) {
3976         case HNAE3_FUNC_RESET:
3977         case HNAE3_FLR_RESET:
3978                 ret = hclge_set_all_vf_rst(hdev, false);
3979                 break;
3980         case HNAE3_GLOBAL_RESET:
3981         case HNAE3_IMP_RESET:
3982                 ret = hclge_set_rst_done(hdev);
3983                 break;
3984         default:
3985                 break;
3986         }
3987
3988         /* clear up the handshake status after re-initialize done */
3989         hclge_reset_handshake(hdev, false);
3990
3991         return ret;
3992 }
3993
3994 static int hclge_reset_stack(struct hclge_dev *hdev)
3995 {
3996         int ret;
3997
3998         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3999         if (ret)
4000                 return ret;
4001
4002         ret = hclge_reset_ae_dev(hdev->ae_dev);
4003         if (ret)
4004                 return ret;
4005
4006         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4007 }
4008
4009 static int hclge_reset_prepare(struct hclge_dev *hdev)
4010 {
4011         int ret;
4012
4013         hdev->rst_stats.reset_cnt++;
4014         /* perform reset of the stack & ae device for a client */
4015         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4016         if (ret)
4017                 return ret;
4018
4019         rtnl_lock();
4020         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4021         rtnl_unlock();
4022         if (ret)
4023                 return ret;
4024
4025         return hclge_reset_prepare_wait(hdev);
4026 }
4027
4028 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4029 {
4030         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4031         enum hnae3_reset_type reset_level;
4032         int ret;
4033
4034         hdev->rst_stats.hw_reset_done_cnt++;
4035
4036         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4037         if (ret)
4038                 return ret;
4039
4040         rtnl_lock();
4041         ret = hclge_reset_stack(hdev);
4042         rtnl_unlock();
4043         if (ret)
4044                 return ret;
4045
4046         hclge_clear_reset_cause(hdev);
4047
4048         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4049         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4050          * times
4051          */
4052         if (ret &&
4053             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4054                 return ret;
4055
4056         ret = hclge_reset_prepare_up(hdev);
4057         if (ret)
4058                 return ret;
4059
4060         rtnl_lock();
4061         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4062         rtnl_unlock();
4063         if (ret)
4064                 return ret;
4065
4066         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4067         if (ret)
4068                 return ret;
4069
4070         hdev->last_reset_time = jiffies;
4071         hdev->rst_stats.reset_fail_cnt = 0;
4072         hdev->rst_stats.reset_done_cnt++;
4073         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4074
4075         /* if default_reset_request has a higher level reset request,
4076          * it should be handled as soon as possible. since some errors
4077          * need this kind of reset to fix.
4078          */
4079         reset_level = hclge_get_reset_level(ae_dev,
4080                                             &hdev->default_reset_request);
4081         if (reset_level != HNAE3_NONE_RESET)
4082                 set_bit(reset_level, &hdev->reset_request);
4083
4084         return 0;
4085 }
4086
4087 static void hclge_reset(struct hclge_dev *hdev)
4088 {
4089         if (hclge_reset_prepare(hdev))
4090                 goto err_reset;
4091
4092         if (hclge_reset_wait(hdev))
4093                 goto err_reset;
4094
4095         if (hclge_reset_rebuild(hdev))
4096                 goto err_reset;
4097
4098         return;
4099
4100 err_reset:
4101         if (hclge_reset_err_handle(hdev))
4102                 hclge_reset_task_schedule(hdev);
4103 }
4104
4105 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4106 {
4107         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4108         struct hclge_dev *hdev = ae_dev->priv;
4109
4110         /* We might end up getting called broadly because of 2 below cases:
4111          * 1. Recoverable error was conveyed through APEI and only way to bring
4112          *    normalcy is to reset.
4113          * 2. A new reset request from the stack due to timeout
4114          *
4115          * For the first case,error event might not have ae handle available.
4116          * check if this is a new reset request and we are not here just because
4117          * last reset attempt did not succeed and watchdog hit us again. We will
4118          * know this if last reset request did not occur very recently (watchdog
4119          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4120          * In case of new request we reset the "reset level" to PF reset.
4121          * And if it is a repeat reset request of the most recent one then we
4122          * want to make sure we throttle the reset request. Therefore, we will
4123          * not allow it again before 3*HZ times.
4124          */
4125         if (!handle)
4126                 handle = &hdev->vport[0].nic;
4127
4128         if (time_before(jiffies, (hdev->last_reset_time +
4129                                   HCLGE_RESET_INTERVAL))) {
4130                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4131                 return;
4132         } else if (hdev->default_reset_request) {
4133                 hdev->reset_level =
4134                         hclge_get_reset_level(ae_dev,
4135                                               &hdev->default_reset_request);
4136         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4137                 hdev->reset_level = HNAE3_FUNC_RESET;
4138         }
4139
4140         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4141                  hdev->reset_level);
4142
4143         /* request reset & schedule reset task */
4144         set_bit(hdev->reset_level, &hdev->reset_request);
4145         hclge_reset_task_schedule(hdev);
4146
4147         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4148                 hdev->reset_level++;
4149 }
4150
4151 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4152                                         enum hnae3_reset_type rst_type)
4153 {
4154         struct hclge_dev *hdev = ae_dev->priv;
4155
4156         set_bit(rst_type, &hdev->default_reset_request);
4157 }
4158
4159 static void hclge_reset_timer(struct timer_list *t)
4160 {
4161         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4162
4163         /* if default_reset_request has no value, it means that this reset
4164          * request has already be handled, so just return here
4165          */
4166         if (!hdev->default_reset_request)
4167                 return;
4168
4169         dev_info(&hdev->pdev->dev,
4170                  "triggering reset in reset timer\n");
4171         hclge_reset_event(hdev->pdev, NULL);
4172 }
4173
4174 static void hclge_reset_subtask(struct hclge_dev *hdev)
4175 {
4176         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4177
4178         /* check if there is any ongoing reset in the hardware. This status can
4179          * be checked from reset_pending. If there is then, we need to wait for
4180          * hardware to complete reset.
4181          *    a. If we are able to figure out in reasonable time that hardware
4182          *       has fully resetted then, we can proceed with driver, client
4183          *       reset.
4184          *    b. else, we can come back later to check this status so re-sched
4185          *       now.
4186          */
4187         hdev->last_reset_time = jiffies;
4188         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4189         if (hdev->reset_type != HNAE3_NONE_RESET)
4190                 hclge_reset(hdev);
4191
4192         /* check if we got any *new* reset requests to be honored */
4193         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4194         if (hdev->reset_type != HNAE3_NONE_RESET)
4195                 hclge_do_reset(hdev);
4196
4197         hdev->reset_type = HNAE3_NONE_RESET;
4198 }
4199
4200 static void hclge_reset_service_task(struct hclge_dev *hdev)
4201 {
4202         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4203                 return;
4204
4205         down(&hdev->reset_sem);
4206         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4207
4208         hclge_reset_subtask(hdev);
4209
4210         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4211         up(&hdev->reset_sem);
4212 }
4213
4214 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4215 {
4216         int i;
4217
4218         /* start from vport 1 for PF is always alive */
4219         for (i = 1; i < hdev->num_alloc_vport; i++) {
4220                 struct hclge_vport *vport = &hdev->vport[i];
4221
4222                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4223                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4224
4225                 /* If vf is not alive, set to default value */
4226                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4227                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4228         }
4229 }
4230
4231 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4232 {
4233         unsigned long delta = round_jiffies_relative(HZ);
4234
4235         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4236                 return;
4237
4238         /* Always handle the link updating to make sure link state is
4239          * updated when it is triggered by mbx.
4240          */
4241         hclge_update_link_status(hdev);
4242         hclge_sync_mac_table(hdev);
4243         hclge_sync_promisc_mode(hdev);
4244
4245         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4246                 delta = jiffies - hdev->last_serv_processed;
4247
4248                 if (delta < round_jiffies_relative(HZ)) {
4249                         delta = round_jiffies_relative(HZ) - delta;
4250                         goto out;
4251                 }
4252         }
4253
4254         hdev->serv_processed_cnt++;
4255         hclge_update_vport_alive(hdev);
4256
4257         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4258                 hdev->last_serv_processed = jiffies;
4259                 goto out;
4260         }
4261
4262         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4263                 hclge_update_stats_for_all(hdev);
4264
4265         hclge_update_port_info(hdev);
4266         hclge_sync_vlan_filter(hdev);
4267
4268         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4269                 hclge_rfs_filter_expire(hdev);
4270
4271         hdev->last_serv_processed = jiffies;
4272
4273 out:
4274         hclge_task_schedule(hdev, delta);
4275 }
4276
4277 static void hclge_service_task(struct work_struct *work)
4278 {
4279         struct hclge_dev *hdev =
4280                 container_of(work, struct hclge_dev, service_task.work);
4281
4282         hclge_reset_service_task(hdev);
4283         hclge_mailbox_service_task(hdev);
4284         hclge_periodic_service_task(hdev);
4285
4286         /* Handle reset and mbx again in case periodical task delays the
4287          * handling by calling hclge_task_schedule() in
4288          * hclge_periodic_service_task().
4289          */
4290         hclge_reset_service_task(hdev);
4291         hclge_mailbox_service_task(hdev);
4292 }
4293
4294 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4295 {
4296         /* VF handle has no client */
4297         if (!handle->client)
4298                 return container_of(handle, struct hclge_vport, nic);
4299         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4300                 return container_of(handle, struct hclge_vport, roce);
4301         else
4302                 return container_of(handle, struct hclge_vport, nic);
4303 }
4304
4305 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4306                                   struct hnae3_vector_info *vector_info)
4307 {
4308 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4309
4310         vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4311
4312         /* need an extend offset to config vector >= 64 */
4313         if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4314                 vector_info->io_addr = hdev->hw.io_base +
4315                                 HCLGE_VECTOR_REG_BASE +
4316                                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4317         else
4318                 vector_info->io_addr = hdev->hw.io_base +
4319                                 HCLGE_VECTOR_EXT_REG_BASE +
4320                                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4321                                 HCLGE_VECTOR_REG_OFFSET_H +
4322                                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4323                                 HCLGE_VECTOR_REG_OFFSET;
4324
4325         hdev->vector_status[idx] = hdev->vport[0].vport_id;
4326         hdev->vector_irq[idx] = vector_info->vector;
4327 }
4328
4329 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4330                             struct hnae3_vector_info *vector_info)
4331 {
4332         struct hclge_vport *vport = hclge_get_vport(handle);
4333         struct hnae3_vector_info *vector = vector_info;
4334         struct hclge_dev *hdev = vport->back;
4335         int alloc = 0;
4336         u16 i = 0;
4337         u16 j;
4338
4339         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4340         vector_num = min(hdev->num_msi_left, vector_num);
4341
4342         for (j = 0; j < vector_num; j++) {
4343                 while (++i < hdev->num_nic_msi) {
4344                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4345                                 hclge_get_vector_info(hdev, i, vector);
4346                                 vector++;
4347                                 alloc++;
4348
4349                                 break;
4350                         }
4351                 }
4352         }
4353         hdev->num_msi_left -= alloc;
4354         hdev->num_msi_used += alloc;
4355
4356         return alloc;
4357 }
4358
4359 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4360 {
4361         int i;
4362
4363         for (i = 0; i < hdev->num_msi; i++)
4364                 if (vector == hdev->vector_irq[i])
4365                         return i;
4366
4367         return -EINVAL;
4368 }
4369
4370 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4371 {
4372         struct hclge_vport *vport = hclge_get_vport(handle);
4373         struct hclge_dev *hdev = vport->back;
4374         int vector_id;
4375
4376         vector_id = hclge_get_vector_index(hdev, vector);
4377         if (vector_id < 0) {
4378                 dev_err(&hdev->pdev->dev,
4379                         "Get vector index fail. vector = %d\n", vector);
4380                 return vector_id;
4381         }
4382
4383         hclge_free_vector(hdev, vector_id);
4384
4385         return 0;
4386 }
4387
4388 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4389 {
4390         return HCLGE_RSS_KEY_SIZE;
4391 }
4392
4393 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4394                                   const u8 hfunc, const u8 *key)
4395 {
4396         struct hclge_rss_config_cmd *req;
4397         unsigned int key_offset = 0;
4398         struct hclge_desc desc;
4399         int key_counts;
4400         int key_size;
4401         int ret;
4402
4403         key_counts = HCLGE_RSS_KEY_SIZE;
4404         req = (struct hclge_rss_config_cmd *)desc.data;
4405
4406         while (key_counts) {
4407                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4408                                            false);
4409
4410                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4411                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4412
4413                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4414                 memcpy(req->hash_key,
4415                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4416
4417                 key_counts -= key_size;
4418                 key_offset++;
4419                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4420                 if (ret) {
4421                         dev_err(&hdev->pdev->dev,
4422                                 "Configure RSS config fail, status = %d\n",
4423                                 ret);
4424                         return ret;
4425                 }
4426         }
4427         return 0;
4428 }
4429
4430 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4431 {
4432         struct hclge_rss_indirection_table_cmd *req;
4433         struct hclge_desc desc;
4434         int rss_cfg_tbl_num;
4435         u8 rss_msb_oft;
4436         u8 rss_msb_val;
4437         int ret;
4438         u16 qid;
4439         int i;
4440         u32 j;
4441
4442         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4443         rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4444                           HCLGE_RSS_CFG_TBL_SIZE;
4445
4446         for (i = 0; i < rss_cfg_tbl_num; i++) {
4447                 hclge_cmd_setup_basic_desc
4448                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4449
4450                 req->start_table_index =
4451                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4452                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4453                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4454                         qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4455                         req->rss_qid_l[j] = qid & 0xff;
4456                         rss_msb_oft =
4457                                 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4458                         rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4459                                 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4460                         req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4461                 }
4462                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4463                 if (ret) {
4464                         dev_err(&hdev->pdev->dev,
4465                                 "Configure rss indir table fail,status = %d\n",
4466                                 ret);
4467                         return ret;
4468                 }
4469         }
4470         return 0;
4471 }
4472
4473 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4474                                  u16 *tc_size, u16 *tc_offset)
4475 {
4476         struct hclge_rss_tc_mode_cmd *req;
4477         struct hclge_desc desc;
4478         int ret;
4479         int i;
4480
4481         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4482         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4483
4484         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4485                 u16 mode = 0;
4486
4487                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4488                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4489                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4490                 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4491                               tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4492                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4493                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4494
4495                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4496         }
4497
4498         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4499         if (ret)
4500                 dev_err(&hdev->pdev->dev,
4501                         "Configure rss tc mode fail, status = %d\n", ret);
4502
4503         return ret;
4504 }
4505
4506 static void hclge_get_rss_type(struct hclge_vport *vport)
4507 {
4508         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4509             vport->rss_tuple_sets.ipv4_udp_en ||
4510             vport->rss_tuple_sets.ipv4_sctp_en ||
4511             vport->rss_tuple_sets.ipv6_tcp_en ||
4512             vport->rss_tuple_sets.ipv6_udp_en ||
4513             vport->rss_tuple_sets.ipv6_sctp_en)
4514                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4515         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4516                  vport->rss_tuple_sets.ipv6_fragment_en)
4517                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4518         else
4519                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4520 }
4521
4522 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4523 {
4524         struct hclge_rss_input_tuple_cmd *req;
4525         struct hclge_desc desc;
4526         int ret;
4527
4528         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4529
4530         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4531
4532         /* Get the tuple cfg from pf */
4533         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4534         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4535         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4536         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4537         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4538         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4539         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4540         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4541         hclge_get_rss_type(&hdev->vport[0]);
4542         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4543         if (ret)
4544                 dev_err(&hdev->pdev->dev,
4545                         "Configure rss input fail, status = %d\n", ret);
4546         return ret;
4547 }
4548
4549 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4550                          u8 *key, u8 *hfunc)
4551 {
4552         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4553         struct hclge_vport *vport = hclge_get_vport(handle);
4554         int i;
4555
4556         /* Get hash algorithm */
4557         if (hfunc) {
4558                 switch (vport->rss_algo) {
4559                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4560                         *hfunc = ETH_RSS_HASH_TOP;
4561                         break;
4562                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4563                         *hfunc = ETH_RSS_HASH_XOR;
4564                         break;
4565                 default:
4566                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4567                         break;
4568                 }
4569         }
4570
4571         /* Get the RSS Key required by the user */
4572         if (key)
4573                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4574
4575         /* Get indirect table */
4576         if (indir)
4577                 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4578                         indir[i] =  vport->rss_indirection_tbl[i];
4579
4580         return 0;
4581 }
4582
4583 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4584                          const  u8 *key, const  u8 hfunc)
4585 {
4586         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4587         struct hclge_vport *vport = hclge_get_vport(handle);
4588         struct hclge_dev *hdev = vport->back;
4589         u8 hash_algo;
4590         int ret, i;
4591
4592         /* Set the RSS Hash Key if specififed by the user */
4593         if (key) {
4594                 switch (hfunc) {
4595                 case ETH_RSS_HASH_TOP:
4596                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4597                         break;
4598                 case ETH_RSS_HASH_XOR:
4599                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4600                         break;
4601                 case ETH_RSS_HASH_NO_CHANGE:
4602                         hash_algo = vport->rss_algo;
4603                         break;
4604                 default:
4605                         return -EINVAL;
4606                 }
4607
4608                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4609                 if (ret)
4610                         return ret;
4611
4612                 /* Update the shadow RSS key with user specified qids */
4613                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4614                 vport->rss_algo = hash_algo;
4615         }
4616
4617         /* Update the shadow RSS table with user specified qids */
4618         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4619                 vport->rss_indirection_tbl[i] = indir[i];
4620
4621         /* Update the hardware */
4622         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4623 }
4624
4625 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4626 {
4627         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4628
4629         if (nfc->data & RXH_L4_B_2_3)
4630                 hash_sets |= HCLGE_D_PORT_BIT;
4631         else
4632                 hash_sets &= ~HCLGE_D_PORT_BIT;
4633
4634         if (nfc->data & RXH_IP_SRC)
4635                 hash_sets |= HCLGE_S_IP_BIT;
4636         else
4637                 hash_sets &= ~HCLGE_S_IP_BIT;
4638
4639         if (nfc->data & RXH_IP_DST)
4640                 hash_sets |= HCLGE_D_IP_BIT;
4641         else
4642                 hash_sets &= ~HCLGE_D_IP_BIT;
4643
4644         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4645                 hash_sets |= HCLGE_V_TAG_BIT;
4646
4647         return hash_sets;
4648 }
4649
4650 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4651                                     struct ethtool_rxnfc *nfc,
4652                                     struct hclge_rss_input_tuple_cmd *req)
4653 {
4654         struct hclge_dev *hdev = vport->back;
4655         u8 tuple_sets;
4656
4657         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4658         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4659         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4660         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4661         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4662         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4663         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4664         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4665
4666         tuple_sets = hclge_get_rss_hash_bits(nfc);
4667         switch (nfc->flow_type) {
4668         case TCP_V4_FLOW:
4669                 req->ipv4_tcp_en = tuple_sets;
4670                 break;
4671         case TCP_V6_FLOW:
4672                 req->ipv6_tcp_en = tuple_sets;
4673                 break;
4674         case UDP_V4_FLOW:
4675                 req->ipv4_udp_en = tuple_sets;
4676                 break;
4677         case UDP_V6_FLOW:
4678                 req->ipv6_udp_en = tuple_sets;
4679                 break;
4680         case SCTP_V4_FLOW:
4681                 req->ipv4_sctp_en = tuple_sets;
4682                 break;
4683         case SCTP_V6_FLOW:
4684                 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4685                     (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4686                         return -EINVAL;
4687
4688                 req->ipv6_sctp_en = tuple_sets;
4689                 break;
4690         case IPV4_FLOW:
4691                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4692                 break;
4693         case IPV6_FLOW:
4694                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4695                 break;
4696         default:
4697                 return -EINVAL;
4698         }
4699
4700         return 0;
4701 }
4702
4703 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4704                                struct ethtool_rxnfc *nfc)
4705 {
4706         struct hclge_vport *vport = hclge_get_vport(handle);
4707         struct hclge_dev *hdev = vport->back;
4708         struct hclge_rss_input_tuple_cmd *req;
4709         struct hclge_desc desc;
4710         int ret;
4711
4712         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4713                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4714                 return -EINVAL;
4715
4716         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4717         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4718
4719         ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4720         if (ret) {
4721                 dev_err(&hdev->pdev->dev,
4722                         "failed to init rss tuple cmd, ret = %d\n", ret);
4723                 return ret;
4724         }
4725
4726         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4727         if (ret) {
4728                 dev_err(&hdev->pdev->dev,
4729                         "Set rss tuple fail, status = %d\n", ret);
4730                 return ret;
4731         }
4732
4733         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4734         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4735         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4736         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4737         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4738         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4739         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4740         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4741         hclge_get_rss_type(vport);
4742         return 0;
4743 }
4744
4745 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4746                                      u8 *tuple_sets)
4747 {
4748         switch (flow_type) {
4749         case TCP_V4_FLOW:
4750                 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4751                 break;
4752         case UDP_V4_FLOW:
4753                 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4754                 break;
4755         case TCP_V6_FLOW:
4756                 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4757                 break;
4758         case UDP_V6_FLOW:
4759                 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4760                 break;
4761         case SCTP_V4_FLOW:
4762                 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4763                 break;
4764         case SCTP_V6_FLOW:
4765                 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4766                 break;
4767         case IPV4_FLOW:
4768         case IPV6_FLOW:
4769                 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4770                 break;
4771         default:
4772                 return -EINVAL;
4773         }
4774
4775         return 0;
4776 }
4777
4778 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4779 {
4780         u64 tuple_data = 0;
4781
4782         if (tuple_sets & HCLGE_D_PORT_BIT)
4783                 tuple_data |= RXH_L4_B_2_3;
4784         if (tuple_sets & HCLGE_S_PORT_BIT)
4785                 tuple_data |= RXH_L4_B_0_1;
4786         if (tuple_sets & HCLGE_D_IP_BIT)
4787                 tuple_data |= RXH_IP_DST;
4788         if (tuple_sets & HCLGE_S_IP_BIT)
4789                 tuple_data |= RXH_IP_SRC;
4790
4791         return tuple_data;
4792 }
4793
4794 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4795                                struct ethtool_rxnfc *nfc)
4796 {
4797         struct hclge_vport *vport = hclge_get_vport(handle);
4798         u8 tuple_sets;
4799         int ret;
4800
4801         nfc->data = 0;
4802
4803         ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4804         if (ret || !tuple_sets)
4805                 return ret;
4806
4807         nfc->data = hclge_convert_rss_tuple(tuple_sets);
4808
4809         return 0;
4810 }
4811
4812 static int hclge_get_tc_size(struct hnae3_handle *handle)
4813 {
4814         struct hclge_vport *vport = hclge_get_vport(handle);
4815         struct hclge_dev *hdev = vport->back;
4816
4817         return hdev->pf_rss_size_max;
4818 }
4819
4820 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4821 {
4822         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4823         struct hclge_vport *vport = hdev->vport;
4824         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4825         u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4826         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4827         struct hnae3_tc_info *tc_info;
4828         u16 roundup_size;
4829         u16 rss_size;
4830         int i;
4831
4832         tc_info = &vport->nic.kinfo.tc_info;
4833         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4834                 rss_size = tc_info->tqp_count[i];
4835                 tc_valid[i] = 0;
4836
4837                 if (!(hdev->hw_tc_map & BIT(i)))
4838                         continue;
4839
4840                 /* tc_size set to hardware is the log2 of roundup power of two
4841                  * of rss_size, the acutal queue size is limited by indirection
4842                  * table.
4843                  */
4844                 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4845                     rss_size == 0) {
4846                         dev_err(&hdev->pdev->dev,
4847                                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4848                                 rss_size);
4849                         return -EINVAL;
4850                 }
4851
4852                 roundup_size = roundup_pow_of_two(rss_size);
4853                 roundup_size = ilog2(roundup_size);
4854
4855                 tc_valid[i] = 1;
4856                 tc_size[i] = roundup_size;
4857                 tc_offset[i] = tc_info->tqp_offset[i];
4858         }
4859
4860         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4861 }
4862
4863 int hclge_rss_init_hw(struct hclge_dev *hdev)
4864 {
4865         struct hclge_vport *vport = hdev->vport;
4866         u16 *rss_indir = vport[0].rss_indirection_tbl;
4867         u8 *key = vport[0].rss_hash_key;
4868         u8 hfunc = vport[0].rss_algo;
4869         int ret;
4870
4871         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4872         if (ret)
4873                 return ret;
4874
4875         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4876         if (ret)
4877                 return ret;
4878
4879         ret = hclge_set_rss_input_tuple(hdev);
4880         if (ret)
4881                 return ret;
4882
4883         return hclge_init_rss_tc_mode(hdev);
4884 }
4885
4886 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4887 {
4888         struct hclge_vport *vport = hdev->vport;
4889         int i, j;
4890
4891         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4892                 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4893                         vport[j].rss_indirection_tbl[i] =
4894                                 i % vport[j].alloc_rss_size;
4895         }
4896 }
4897
4898 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4899 {
4900         u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4901         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4902         struct hclge_vport *vport = hdev->vport;
4903
4904         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4905                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4906
4907         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4908                 u16 *rss_ind_tbl;
4909
4910                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4911                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4912                 vport[i].rss_tuple_sets.ipv4_udp_en =
4913                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4914                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4915                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4916                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4917                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4918                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4919                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4920                 vport[i].rss_tuple_sets.ipv6_udp_en =
4921                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4922                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4923                         hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4924                         HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4925                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4926                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4927                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4928
4929                 vport[i].rss_algo = rss_algo;
4930
4931                 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4932                                            sizeof(*rss_ind_tbl), GFP_KERNEL);
4933                 if (!rss_ind_tbl)
4934                         return -ENOMEM;
4935
4936                 vport[i].rss_indirection_tbl = rss_ind_tbl;
4937                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4938                        HCLGE_RSS_KEY_SIZE);
4939         }
4940
4941         hclge_rss_indir_init_cfg(hdev);
4942
4943         return 0;
4944 }
4945
4946 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4947                                 int vector_id, bool en,
4948                                 struct hnae3_ring_chain_node *ring_chain)
4949 {
4950         struct hclge_dev *hdev = vport->back;
4951         struct hnae3_ring_chain_node *node;
4952         struct hclge_desc desc;
4953         struct hclge_ctrl_vector_chain_cmd *req =
4954                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4955         enum hclge_cmd_status status;
4956         enum hclge_opcode_type op;
4957         u16 tqp_type_and_id;
4958         int i;
4959
4960         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4961         hclge_cmd_setup_basic_desc(&desc, op, false);
4962         req->int_vector_id_l = hnae3_get_field(vector_id,
4963                                                HCLGE_VECTOR_ID_L_M,
4964                                                HCLGE_VECTOR_ID_L_S);
4965         req->int_vector_id_h = hnae3_get_field(vector_id,
4966                                                HCLGE_VECTOR_ID_H_M,
4967                                                HCLGE_VECTOR_ID_H_S);
4968
4969         i = 0;
4970         for (node = ring_chain; node; node = node->next) {
4971                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4972                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4973                                 HCLGE_INT_TYPE_S,
4974                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4975                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4976                                 HCLGE_TQP_ID_S, node->tqp_index);
4977                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4978                                 HCLGE_INT_GL_IDX_S,
4979                                 hnae3_get_field(node->int_gl_idx,
4980                                                 HNAE3_RING_GL_IDX_M,
4981                                                 HNAE3_RING_GL_IDX_S));
4982                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4983                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4984                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4985                         req->vfid = vport->vport_id;
4986
4987                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4988                         if (status) {
4989                                 dev_err(&hdev->pdev->dev,
4990                                         "Map TQP fail, status is %d.\n",
4991                                         status);
4992                                 return -EIO;
4993                         }
4994                         i = 0;
4995
4996                         hclge_cmd_setup_basic_desc(&desc,
4997                                                    op,
4998                                                    false);
4999                         req->int_vector_id_l =
5000                                 hnae3_get_field(vector_id,
5001                                                 HCLGE_VECTOR_ID_L_M,
5002                                                 HCLGE_VECTOR_ID_L_S);
5003                         req->int_vector_id_h =
5004                                 hnae3_get_field(vector_id,
5005                                                 HCLGE_VECTOR_ID_H_M,
5006                                                 HCLGE_VECTOR_ID_H_S);
5007                 }
5008         }
5009
5010         if (i > 0) {
5011                 req->int_cause_num = i;
5012                 req->vfid = vport->vport_id;
5013                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5014                 if (status) {
5015                         dev_err(&hdev->pdev->dev,
5016                                 "Map TQP fail, status is %d.\n", status);
5017                         return -EIO;
5018                 }
5019         }
5020
5021         return 0;
5022 }
5023
5024 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5025                                     struct hnae3_ring_chain_node *ring_chain)
5026 {
5027         struct hclge_vport *vport = hclge_get_vport(handle);
5028         struct hclge_dev *hdev = vport->back;
5029         int vector_id;
5030
5031         vector_id = hclge_get_vector_index(hdev, vector);
5032         if (vector_id < 0) {
5033                 dev_err(&hdev->pdev->dev,
5034                         "failed to get vector index. vector=%d\n", vector);
5035                 return vector_id;
5036         }
5037
5038         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5039 }
5040
5041 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5042                                        struct hnae3_ring_chain_node *ring_chain)
5043 {
5044         struct hclge_vport *vport = hclge_get_vport(handle);
5045         struct hclge_dev *hdev = vport->back;
5046         int vector_id, ret;
5047
5048         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5049                 return 0;
5050
5051         vector_id = hclge_get_vector_index(hdev, vector);
5052         if (vector_id < 0) {
5053                 dev_err(&handle->pdev->dev,
5054                         "Get vector index fail. ret =%d\n", vector_id);
5055                 return vector_id;
5056         }
5057
5058         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5059         if (ret)
5060                 dev_err(&handle->pdev->dev,
5061                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5062                         vector_id, ret);
5063
5064         return ret;
5065 }
5066
5067 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5068                                       bool en_uc, bool en_mc, bool en_bc)
5069 {
5070         struct hclge_vport *vport = &hdev->vport[vf_id];
5071         struct hnae3_handle *handle = &vport->nic;
5072         struct hclge_promisc_cfg_cmd *req;
5073         struct hclge_desc desc;
5074         bool uc_tx_en = en_uc;
5075         u8 promisc_cfg = 0;
5076         int ret;
5077
5078         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5079
5080         req = (struct hclge_promisc_cfg_cmd *)desc.data;
5081         req->vf_id = vf_id;
5082
5083         if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5084                 uc_tx_en = false;
5085
5086         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5087         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5088         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5089         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5090         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5091         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5092         req->extend_promisc = promisc_cfg;
5093
5094         /* to be compatible with DEVICE_VERSION_V1/2 */
5095         promisc_cfg = 0;
5096         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5097         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5098         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5099         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5100         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5101         req->promisc = promisc_cfg;
5102
5103         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5104         if (ret)
5105                 dev_err(&hdev->pdev->dev,
5106                         "failed to set vport %u promisc mode, ret = %d.\n",
5107                         vf_id, ret);
5108
5109         return ret;
5110 }
5111
5112 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5113                                  bool en_mc_pmc, bool en_bc_pmc)
5114 {
5115         return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5116                                           en_uc_pmc, en_mc_pmc, en_bc_pmc);
5117 }
5118
5119 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5120                                   bool en_mc_pmc)
5121 {
5122         struct hclge_vport *vport = hclge_get_vport(handle);
5123         struct hclge_dev *hdev = vport->back;
5124         bool en_bc_pmc = true;
5125
5126         /* For device whose version below V2, if broadcast promisc enabled,
5127          * vlan filter is always bypassed. So broadcast promisc should be
5128          * disabled until user enable promisc mode
5129          */
5130         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5131                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5132
5133         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5134                                             en_bc_pmc);
5135 }
5136
5137 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5138 {
5139         struct hclge_vport *vport = hclge_get_vport(handle);
5140         struct hclge_dev *hdev = vport->back;
5141
5142         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
5143 }
5144
5145 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5146 {
5147         struct hclge_get_fd_mode_cmd *req;
5148         struct hclge_desc desc;
5149         int ret;
5150
5151         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5152
5153         req = (struct hclge_get_fd_mode_cmd *)desc.data;
5154
5155         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5156         if (ret) {
5157                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5158                 return ret;
5159         }
5160
5161         *fd_mode = req->mode;
5162
5163         return ret;
5164 }
5165
5166 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5167                                    u32 *stage1_entry_num,
5168                                    u32 *stage2_entry_num,
5169                                    u16 *stage1_counter_num,
5170                                    u16 *stage2_counter_num)
5171 {
5172         struct hclge_get_fd_allocation_cmd *req;
5173         struct hclge_desc desc;
5174         int ret;
5175
5176         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5177
5178         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5179
5180         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5181         if (ret) {
5182                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5183                         ret);
5184                 return ret;
5185         }
5186
5187         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5188         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5189         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5190         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5191
5192         return ret;
5193 }
5194
5195 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5196                                    enum HCLGE_FD_STAGE stage_num)
5197 {
5198         struct hclge_set_fd_key_config_cmd *req;
5199         struct hclge_fd_key_cfg *stage;
5200         struct hclge_desc desc;
5201         int ret;
5202
5203         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5204
5205         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5206         stage = &hdev->fd_cfg.key_cfg[stage_num];
5207         req->stage = stage_num;
5208         req->key_select = stage->key_sel;
5209         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5210         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5211         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5212         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5213         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5214         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5215
5216         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5217         if (ret)
5218                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5219
5220         return ret;
5221 }
5222
5223 static int hclge_init_fd_config(struct hclge_dev *hdev)
5224 {
5225 #define LOW_2_WORDS             0x03
5226         struct hclge_fd_key_cfg *key_cfg;
5227         int ret;
5228
5229         if (!hnae3_dev_fd_supported(hdev))
5230                 return 0;
5231
5232         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5233         if (ret)
5234                 return ret;
5235
5236         switch (hdev->fd_cfg.fd_mode) {
5237         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5238                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5239                 break;
5240         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5241                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5242                 break;
5243         default:
5244                 dev_err(&hdev->pdev->dev,
5245                         "Unsupported flow director mode %u\n",
5246                         hdev->fd_cfg.fd_mode);
5247                 return -EOPNOTSUPP;
5248         }
5249
5250         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5251         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5252         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5253         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5254         key_cfg->outer_sipv6_word_en = 0;
5255         key_cfg->outer_dipv6_word_en = 0;
5256
5257         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5258                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5259                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5260                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5261
5262         /* If use max 400bit key, we can support tuples for ether type */
5263         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5264                 key_cfg->tuple_active |=
5265                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5266
5267         /* roce_type is used to filter roce frames
5268          * dst_vport is used to specify the rule
5269          */
5270         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5271
5272         ret = hclge_get_fd_allocation(hdev,
5273                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5274                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5275                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5276                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5277         if (ret)
5278                 return ret;
5279
5280         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5281 }
5282
5283 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5284                                 int loc, u8 *key, bool is_add)
5285 {
5286         struct hclge_fd_tcam_config_1_cmd *req1;
5287         struct hclge_fd_tcam_config_2_cmd *req2;
5288         struct hclge_fd_tcam_config_3_cmd *req3;
5289         struct hclge_desc desc[3];
5290         int ret;
5291
5292         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5293         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5294         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5295         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5296         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5297
5298         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5299         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5300         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5301
5302         req1->stage = stage;
5303         req1->xy_sel = sel_x ? 1 : 0;
5304         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5305         req1->index = cpu_to_le32(loc);
5306         req1->entry_vld = sel_x ? is_add : 0;
5307
5308         if (key) {
5309                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5310                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5311                        sizeof(req2->tcam_data));
5312                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5313                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5314         }
5315
5316         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5317         if (ret)
5318                 dev_err(&hdev->pdev->dev,
5319                         "config tcam key fail, ret=%d\n",
5320                         ret);
5321
5322         return ret;
5323 }
5324
5325 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5326                               struct hclge_fd_ad_data *action)
5327 {
5328         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5329         struct hclge_fd_ad_config_cmd *req;
5330         struct hclge_desc desc;
5331         u64 ad_data = 0;
5332         int ret;
5333
5334         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5335
5336         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5337         req->index = cpu_to_le32(loc);
5338         req->stage = stage;
5339
5340         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5341                       action->write_rule_id_to_bd);
5342         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5343                         action->rule_id);
5344         if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5345                 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5346                               action->override_tc);
5347                 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5348                                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5349         }
5350         ad_data <<= 32;
5351         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5352         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5353                       action->forward_to_direct_queue);
5354         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5355                         action->queue_id);
5356         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5357         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5358                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5359         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5360         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5361                         action->counter_id);
5362
5363         req->ad_data = cpu_to_le64(ad_data);
5364         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5365         if (ret)
5366                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5367
5368         return ret;
5369 }
5370
5371 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5372                                    struct hclge_fd_rule *rule)
5373 {
5374         u16 tmp_x_s, tmp_y_s;
5375         u32 tmp_x_l, tmp_y_l;
5376         int i;
5377
5378         if (rule->unused_tuple & tuple_bit)
5379                 return true;
5380
5381         switch (tuple_bit) {
5382         case BIT(INNER_DST_MAC):
5383                 for (i = 0; i < ETH_ALEN; i++) {
5384                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5385                                rule->tuples_mask.dst_mac[i]);
5386                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5387                                rule->tuples_mask.dst_mac[i]);
5388                 }
5389
5390                 return true;
5391         case BIT(INNER_SRC_MAC):
5392                 for (i = 0; i < ETH_ALEN; i++) {
5393                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5394                                rule->tuples_mask.src_mac[i]);
5395                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5396                                rule->tuples_mask.src_mac[i]);
5397                 }
5398
5399                 return true;
5400         case BIT(INNER_VLAN_TAG_FST):
5401                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5402                        rule->tuples_mask.vlan_tag1);
5403                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5404                        rule->tuples_mask.vlan_tag1);
5405                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5406                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5407
5408                 return true;
5409         case BIT(INNER_ETH_TYPE):
5410                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5411                        rule->tuples_mask.ether_proto);
5412                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5413                        rule->tuples_mask.ether_proto);
5414                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5415                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5416
5417                 return true;
5418         case BIT(INNER_IP_TOS):
5419                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5420                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5421
5422                 return true;
5423         case BIT(INNER_IP_PROTO):
5424                 calc_x(*key_x, rule->tuples.ip_proto,
5425                        rule->tuples_mask.ip_proto);
5426                 calc_y(*key_y, rule->tuples.ip_proto,
5427                        rule->tuples_mask.ip_proto);
5428
5429                 return true;
5430         case BIT(INNER_SRC_IP):
5431                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5432                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5433                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5434                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5435                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5436                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5437
5438                 return true;
5439         case BIT(INNER_DST_IP):
5440                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5441                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5442                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5443                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5444                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5445                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5446
5447                 return true;
5448         case BIT(INNER_SRC_PORT):
5449                 calc_x(tmp_x_s, rule->tuples.src_port,
5450                        rule->tuples_mask.src_port);
5451                 calc_y(tmp_y_s, rule->tuples.src_port,
5452                        rule->tuples_mask.src_port);
5453                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5454                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5455
5456                 return true;
5457         case BIT(INNER_DST_PORT):
5458                 calc_x(tmp_x_s, rule->tuples.dst_port,
5459                        rule->tuples_mask.dst_port);
5460                 calc_y(tmp_y_s, rule->tuples.dst_port,
5461                        rule->tuples_mask.dst_port);
5462                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5463                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5464
5465                 return true;
5466         default:
5467                 return false;
5468         }
5469 }
5470
5471 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5472                                  u8 vf_id, u8 network_port_id)
5473 {
5474         u32 port_number = 0;
5475
5476         if (port_type == HOST_PORT) {
5477                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5478                                 pf_id);
5479                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5480                                 vf_id);
5481                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5482         } else {
5483                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5484                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5485                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5486         }
5487
5488         return port_number;
5489 }
5490
5491 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5492                                        __le32 *key_x, __le32 *key_y,
5493                                        struct hclge_fd_rule *rule)
5494 {
5495         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5496         u8 cur_pos = 0, tuple_size, shift_bits;
5497         unsigned int i;
5498
5499         for (i = 0; i < MAX_META_DATA; i++) {
5500                 tuple_size = meta_data_key_info[i].key_length;
5501                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5502
5503                 switch (tuple_bit) {
5504                 case BIT(ROCE_TYPE):
5505                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5506                         cur_pos += tuple_size;
5507                         break;
5508                 case BIT(DST_VPORT):
5509                         port_number = hclge_get_port_number(HOST_PORT, 0,
5510                                                             rule->vf_id, 0);
5511                         hnae3_set_field(meta_data,
5512                                         GENMASK(cur_pos + tuple_size, cur_pos),
5513                                         cur_pos, port_number);
5514                         cur_pos += tuple_size;
5515                         break;
5516                 default:
5517                         break;
5518                 }
5519         }
5520
5521         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5522         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5523         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5524
5525         *key_x = cpu_to_le32(tmp_x << shift_bits);
5526         *key_y = cpu_to_le32(tmp_y << shift_bits);
5527 }
5528
5529 /* A complete key is combined with meta data key and tuple key.
5530  * Meta data key is stored at the MSB region, and tuple key is stored at
5531  * the LSB region, unused bits will be filled 0.
5532  */
5533 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5534                             struct hclge_fd_rule *rule)
5535 {
5536         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5537         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5538         u8 *cur_key_x, *cur_key_y;
5539         u8 meta_data_region;
5540         u8 tuple_size;
5541         int ret;
5542         u32 i;
5543
5544         memset(key_x, 0, sizeof(key_x));
5545         memset(key_y, 0, sizeof(key_y));
5546         cur_key_x = key_x;
5547         cur_key_y = key_y;
5548
5549         for (i = 0 ; i < MAX_TUPLE; i++) {
5550                 bool tuple_valid;
5551                 u32 check_tuple;
5552
5553                 tuple_size = tuple_key_info[i].key_length / 8;
5554                 check_tuple = key_cfg->tuple_active & BIT(i);
5555
5556                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5557                                                      cur_key_y, rule);
5558                 if (tuple_valid) {
5559                         cur_key_x += tuple_size;
5560                         cur_key_y += tuple_size;
5561                 }
5562         }
5563
5564         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5565                         MAX_META_DATA_LENGTH / 8;
5566
5567         hclge_fd_convert_meta_data(key_cfg,
5568                                    (__le32 *)(key_x + meta_data_region),
5569                                    (__le32 *)(key_y + meta_data_region),
5570                                    rule);
5571
5572         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5573                                    true);
5574         if (ret) {
5575                 dev_err(&hdev->pdev->dev,
5576                         "fd key_y config fail, loc=%u, ret=%d\n",
5577                         rule->queue_id, ret);
5578                 return ret;
5579         }
5580
5581         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5582                                    true);
5583         if (ret)
5584                 dev_err(&hdev->pdev->dev,
5585                         "fd key_x config fail, loc=%u, ret=%d\n",
5586                         rule->queue_id, ret);
5587         return ret;
5588 }
5589
5590 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5591                                struct hclge_fd_rule *rule)
5592 {
5593         struct hclge_vport *vport = hdev->vport;
5594         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5595         struct hclge_fd_ad_data ad_data;
5596
5597         memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5598         ad_data.ad_id = rule->location;
5599
5600         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5601                 ad_data.drop_packet = true;
5602         } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5603                 ad_data.override_tc = true;
5604                 ad_data.queue_id =
5605                         kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5606                 ad_data.tc_size =
5607                         ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5608         } else {
5609                 ad_data.forward_to_direct_queue = true;
5610                 ad_data.queue_id = rule->queue_id;
5611         }
5612
5613         ad_data.use_counter = false;
5614         ad_data.counter_id = 0;
5615
5616         ad_data.use_next_stage = false;
5617         ad_data.next_input_key = 0;
5618
5619         ad_data.write_rule_id_to_bd = true;
5620         ad_data.rule_id = rule->location;
5621
5622         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5623 }
5624
5625 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5626                                        u32 *unused_tuple)
5627 {
5628         if (!spec || !unused_tuple)
5629                 return -EINVAL;
5630
5631         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5632
5633         if (!spec->ip4src)
5634                 *unused_tuple |= BIT(INNER_SRC_IP);
5635
5636         if (!spec->ip4dst)
5637                 *unused_tuple |= BIT(INNER_DST_IP);
5638
5639         if (!spec->psrc)
5640                 *unused_tuple |= BIT(INNER_SRC_PORT);
5641
5642         if (!spec->pdst)
5643                 *unused_tuple |= BIT(INNER_DST_PORT);
5644
5645         if (!spec->tos)
5646                 *unused_tuple |= BIT(INNER_IP_TOS);
5647
5648         return 0;
5649 }
5650
5651 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5652                                     u32 *unused_tuple)
5653 {
5654         if (!spec || !unused_tuple)
5655                 return -EINVAL;
5656
5657         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5658                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5659
5660         if (!spec->ip4src)
5661                 *unused_tuple |= BIT(INNER_SRC_IP);
5662
5663         if (!spec->ip4dst)
5664                 *unused_tuple |= BIT(INNER_DST_IP);
5665
5666         if (!spec->tos)
5667                 *unused_tuple |= BIT(INNER_IP_TOS);
5668
5669         if (!spec->proto)
5670                 *unused_tuple |= BIT(INNER_IP_PROTO);
5671
5672         if (spec->l4_4_bytes)
5673                 return -EOPNOTSUPP;
5674
5675         if (spec->ip_ver != ETH_RX_NFC_IP4)
5676                 return -EOPNOTSUPP;
5677
5678         return 0;
5679 }
5680
5681 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5682                                        u32 *unused_tuple)
5683 {
5684         if (!spec || !unused_tuple)
5685                 return -EINVAL;
5686
5687         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5688                 BIT(INNER_IP_TOS);
5689
5690         /* check whether src/dst ip address used */
5691         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5692                 *unused_tuple |= BIT(INNER_SRC_IP);
5693
5694         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5695                 *unused_tuple |= BIT(INNER_DST_IP);
5696
5697         if (!spec->psrc)
5698                 *unused_tuple |= BIT(INNER_SRC_PORT);
5699
5700         if (!spec->pdst)
5701                 *unused_tuple |= BIT(INNER_DST_PORT);
5702
5703         if (spec->tclass)
5704                 return -EOPNOTSUPP;
5705
5706         return 0;
5707 }
5708
5709 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5710                                     u32 *unused_tuple)
5711 {
5712         if (!spec || !unused_tuple)
5713                 return -EINVAL;
5714
5715         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5716                 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5717
5718         /* check whether src/dst ip address used */
5719         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5720                 *unused_tuple |= BIT(INNER_SRC_IP);
5721
5722         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5723                 *unused_tuple |= BIT(INNER_DST_IP);
5724
5725         if (!spec->l4_proto)
5726                 *unused_tuple |= BIT(INNER_IP_PROTO);
5727
5728         if (spec->tclass)
5729                 return -EOPNOTSUPP;
5730
5731         if (spec->l4_4_bytes)
5732                 return -EOPNOTSUPP;
5733
5734         return 0;
5735 }
5736
5737 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5738 {
5739         if (!spec || !unused_tuple)
5740                 return -EINVAL;
5741
5742         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5743                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5744                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5745
5746         if (is_zero_ether_addr(spec->h_source))
5747                 *unused_tuple |= BIT(INNER_SRC_MAC);
5748
5749         if (is_zero_ether_addr(spec->h_dest))
5750                 *unused_tuple |= BIT(INNER_DST_MAC);
5751
5752         if (!spec->h_proto)
5753                 *unused_tuple |= BIT(INNER_ETH_TYPE);
5754
5755         return 0;
5756 }
5757
5758 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5759                                     struct ethtool_rx_flow_spec *fs,
5760                                     u32 *unused_tuple)
5761 {
5762         if (fs->flow_type & FLOW_EXT) {
5763                 if (fs->h_ext.vlan_etype) {
5764                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5765                         return -EOPNOTSUPP;
5766                 }
5767
5768                 if (!fs->h_ext.vlan_tci)
5769                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5770
5771                 if (fs->m_ext.vlan_tci &&
5772                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5773                         dev_err(&hdev->pdev->dev,
5774                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5775                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5776                         return -EINVAL;
5777                 }
5778         } else {
5779                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5780         }
5781
5782         if (fs->flow_type & FLOW_MAC_EXT) {
5783                 if (hdev->fd_cfg.fd_mode !=
5784                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5785                         dev_err(&hdev->pdev->dev,
5786                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5787                         return -EOPNOTSUPP;
5788                 }
5789
5790                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5791                         *unused_tuple |= BIT(INNER_DST_MAC);
5792                 else
5793                         *unused_tuple &= ~BIT(INNER_DST_MAC);
5794         }
5795
5796         return 0;
5797 }
5798
5799 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5800                                struct ethtool_rx_flow_spec *fs,
5801                                u32 *unused_tuple)
5802 {
5803         u32 flow_type;
5804         int ret;
5805
5806         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5807                 dev_err(&hdev->pdev->dev,
5808                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5809                         fs->location,
5810                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5811                 return -EINVAL;
5812         }
5813
5814         if ((fs->flow_type & FLOW_EXT) &&
5815             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5816                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5817                 return -EOPNOTSUPP;
5818         }
5819
5820         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5821         switch (flow_type) {
5822         case SCTP_V4_FLOW:
5823         case TCP_V4_FLOW:
5824         case UDP_V4_FLOW:
5825                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5826                                                   unused_tuple);
5827                 break;
5828         case IP_USER_FLOW:
5829                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5830                                                unused_tuple);
5831                 break;
5832         case SCTP_V6_FLOW:
5833         case TCP_V6_FLOW:
5834         case UDP_V6_FLOW:
5835                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5836                                                   unused_tuple);
5837                 break;
5838         case IPV6_USER_FLOW:
5839                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5840                                                unused_tuple);
5841                 break;
5842         case ETHER_FLOW:
5843                 if (hdev->fd_cfg.fd_mode !=
5844                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5845                         dev_err(&hdev->pdev->dev,
5846                                 "ETHER_FLOW is not supported in current fd mode!\n");
5847                         return -EOPNOTSUPP;
5848                 }
5849
5850                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5851                                                  unused_tuple);
5852                 break;
5853         default:
5854                 dev_err(&hdev->pdev->dev,
5855                         "unsupported protocol type, protocol type = %#x\n",
5856                         flow_type);
5857                 return -EOPNOTSUPP;
5858         }
5859
5860         if (ret) {
5861                 dev_err(&hdev->pdev->dev,
5862                         "failed to check flow union tuple, ret = %d\n",
5863                         ret);
5864                 return ret;
5865         }
5866
5867         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5868 }
5869
5870 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5871 {
5872         struct hclge_fd_rule *rule = NULL;
5873         struct hlist_node *node2;
5874
5875         spin_lock_bh(&hdev->fd_rule_lock);
5876         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5877                 if (rule->location >= location)
5878                         break;
5879         }
5880
5881         spin_unlock_bh(&hdev->fd_rule_lock);
5882
5883         return  rule && rule->location == location;
5884 }
5885
5886 /* make sure being called after lock up with fd_rule_lock */
5887 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5888                                      struct hclge_fd_rule *new_rule,
5889                                      u16 location,
5890                                      bool is_add)
5891 {
5892         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5893         struct hlist_node *node2;
5894
5895         if (is_add && !new_rule)
5896                 return -EINVAL;
5897
5898         hlist_for_each_entry_safe(rule, node2,
5899                                   &hdev->fd_rule_list, rule_node) {
5900                 if (rule->location >= location)
5901                         break;
5902                 parent = rule;
5903         }
5904
5905         if (rule && rule->location == location) {
5906                 hlist_del(&rule->rule_node);
5907                 kfree(rule);
5908                 hdev->hclge_fd_rule_num--;
5909
5910                 if (!is_add) {
5911                         if (!hdev->hclge_fd_rule_num)
5912                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5913                         clear_bit(location, hdev->fd_bmap);
5914
5915                         return 0;
5916                 }
5917         } else if (!is_add) {
5918                 dev_err(&hdev->pdev->dev,
5919                         "delete fail, rule %u is inexistent\n",
5920                         location);
5921                 return -EINVAL;
5922         }
5923
5924         INIT_HLIST_NODE(&new_rule->rule_node);
5925
5926         if (parent)
5927                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5928         else
5929                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5930
5931         set_bit(location, hdev->fd_bmap);
5932         hdev->hclge_fd_rule_num++;
5933         hdev->fd_active_type = new_rule->rule_type;
5934
5935         return 0;
5936 }
5937
5938 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5939                               struct ethtool_rx_flow_spec *fs,
5940                               struct hclge_fd_rule *rule)
5941 {
5942         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5943
5944         switch (flow_type) {
5945         case SCTP_V4_FLOW:
5946         case TCP_V4_FLOW:
5947         case UDP_V4_FLOW:
5948                 rule->tuples.src_ip[IPV4_INDEX] =
5949                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5950                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5951                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5952
5953                 rule->tuples.dst_ip[IPV4_INDEX] =
5954                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5955                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5956                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5957
5958                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5959                 rule->tuples_mask.src_port =
5960                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5961
5962                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5963                 rule->tuples_mask.dst_port =
5964                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5965
5966                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5967                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5968
5969                 rule->tuples.ether_proto = ETH_P_IP;
5970                 rule->tuples_mask.ether_proto = 0xFFFF;
5971
5972                 break;
5973         case IP_USER_FLOW:
5974                 rule->tuples.src_ip[IPV4_INDEX] =
5975                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5976                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5977                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5978
5979                 rule->tuples.dst_ip[IPV4_INDEX] =
5980                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5981                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5982                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5983
5984                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5985                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5986
5987                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5988                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5989
5990                 rule->tuples.ether_proto = ETH_P_IP;
5991                 rule->tuples_mask.ether_proto = 0xFFFF;
5992
5993                 break;
5994         case SCTP_V6_FLOW:
5995         case TCP_V6_FLOW:
5996         case UDP_V6_FLOW:
5997                 be32_to_cpu_array(rule->tuples.src_ip,
5998                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5999                 be32_to_cpu_array(rule->tuples_mask.src_ip,
6000                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
6001
6002                 be32_to_cpu_array(rule->tuples.dst_ip,
6003                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
6004                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6005                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
6006
6007                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6008                 rule->tuples_mask.src_port =
6009                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6010
6011                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6012                 rule->tuples_mask.dst_port =
6013                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6014
6015                 rule->tuples.ether_proto = ETH_P_IPV6;
6016                 rule->tuples_mask.ether_proto = 0xFFFF;
6017
6018                 break;
6019         case IPV6_USER_FLOW:
6020                 be32_to_cpu_array(rule->tuples.src_ip,
6021                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
6022                 be32_to_cpu_array(rule->tuples_mask.src_ip,
6023                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
6024
6025                 be32_to_cpu_array(rule->tuples.dst_ip,
6026                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
6027                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6028                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
6029
6030                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6031                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6032
6033                 rule->tuples.ether_proto = ETH_P_IPV6;
6034                 rule->tuples_mask.ether_proto = 0xFFFF;
6035
6036                 break;
6037         case ETHER_FLOW:
6038                 ether_addr_copy(rule->tuples.src_mac,
6039                                 fs->h_u.ether_spec.h_source);
6040                 ether_addr_copy(rule->tuples_mask.src_mac,
6041                                 fs->m_u.ether_spec.h_source);
6042
6043                 ether_addr_copy(rule->tuples.dst_mac,
6044                                 fs->h_u.ether_spec.h_dest);
6045                 ether_addr_copy(rule->tuples_mask.dst_mac,
6046                                 fs->m_u.ether_spec.h_dest);
6047
6048                 rule->tuples.ether_proto =
6049                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
6050                 rule->tuples_mask.ether_proto =
6051                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
6052
6053                 break;
6054         default:
6055                 return -EOPNOTSUPP;
6056         }
6057
6058         switch (flow_type) {
6059         case SCTP_V4_FLOW:
6060         case SCTP_V6_FLOW:
6061                 rule->tuples.ip_proto = IPPROTO_SCTP;
6062                 rule->tuples_mask.ip_proto = 0xFF;
6063                 break;
6064         case TCP_V4_FLOW:
6065         case TCP_V6_FLOW:
6066                 rule->tuples.ip_proto = IPPROTO_TCP;
6067                 rule->tuples_mask.ip_proto = 0xFF;
6068                 break;
6069         case UDP_V4_FLOW:
6070         case UDP_V6_FLOW:
6071                 rule->tuples.ip_proto = IPPROTO_UDP;
6072                 rule->tuples_mask.ip_proto = 0xFF;
6073                 break;
6074         default:
6075                 break;
6076         }
6077
6078         if (fs->flow_type & FLOW_EXT) {
6079                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6080                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6081         }
6082
6083         if (fs->flow_type & FLOW_MAC_EXT) {
6084                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6085                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6086         }
6087
6088         return 0;
6089 }
6090
6091 /* make sure being called after lock up with fd_rule_lock */
6092 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6093                                 struct hclge_fd_rule *rule)
6094 {
6095         int ret;
6096
6097         if (!rule) {
6098                 dev_err(&hdev->pdev->dev,
6099                         "The flow director rule is NULL\n");
6100                 return -EINVAL;
6101         }
6102
6103         /* it will never fail here, so needn't to check return value */
6104         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
6105
6106         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6107         if (ret)
6108                 goto clear_rule;
6109
6110         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6111         if (ret)
6112                 goto clear_rule;
6113
6114         return 0;
6115
6116 clear_rule:
6117         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
6118         return ret;
6119 }
6120
6121 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6122 {
6123         struct hclge_vport *vport = hclge_get_vport(handle);
6124         struct hclge_dev *hdev = vport->back;
6125
6126         return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6127 }
6128
6129 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6130                               struct ethtool_rxnfc *cmd)
6131 {
6132         struct hclge_vport *vport = hclge_get_vport(handle);
6133         struct hclge_dev *hdev = vport->back;
6134         u16 dst_vport_id = 0, q_index = 0;
6135         struct ethtool_rx_flow_spec *fs;
6136         struct hclge_fd_rule *rule;
6137         u32 unused = 0;
6138         u8 action;
6139         int ret;
6140
6141         if (!hnae3_dev_fd_supported(hdev)) {
6142                 dev_err(&hdev->pdev->dev,
6143                         "flow table director is not supported\n");
6144                 return -EOPNOTSUPP;
6145         }
6146
6147         if (!hdev->fd_en) {
6148                 dev_err(&hdev->pdev->dev,
6149                         "please enable flow director first\n");
6150                 return -EOPNOTSUPP;
6151         }
6152
6153         if (hclge_is_cls_flower_active(handle)) {
6154                 dev_err(&hdev->pdev->dev,
6155                         "please delete all exist cls flower rules first\n");
6156                 return -EINVAL;
6157         }
6158
6159         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6160
6161         ret = hclge_fd_check_spec(hdev, fs, &unused);
6162         if (ret)
6163                 return ret;
6164
6165         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
6166                 action = HCLGE_FD_ACTION_DROP_PACKET;
6167         } else {
6168                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
6169                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
6170                 u16 tqps;
6171
6172                 if (vf > hdev->num_req_vfs) {
6173                         dev_err(&hdev->pdev->dev,
6174                                 "Error: vf id (%u) > max vf num (%u)\n",
6175                                 vf, hdev->num_req_vfs);
6176                         return -EINVAL;
6177                 }
6178
6179                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6180                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
6181
6182                 if (ring >= tqps) {
6183                         dev_err(&hdev->pdev->dev,
6184                                 "Error: queue id (%u) > max tqp num (%u)\n",
6185                                 ring, tqps - 1);
6186                         return -EINVAL;
6187                 }
6188
6189                 action = HCLGE_FD_ACTION_SELECT_QUEUE;
6190                 q_index = ring;
6191         }
6192
6193         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6194         if (!rule)
6195                 return -ENOMEM;
6196
6197         ret = hclge_fd_get_tuple(hdev, fs, rule);
6198         if (ret) {
6199                 kfree(rule);
6200                 return ret;
6201         }
6202
6203         rule->flow_type = fs->flow_type;
6204         rule->location = fs->location;
6205         rule->unused_tuple = unused;
6206         rule->vf_id = dst_vport_id;
6207         rule->queue_id = q_index;
6208         rule->action = action;
6209         rule->rule_type = HCLGE_FD_EP_ACTIVE;
6210
6211         /* to avoid rule conflict, when user configure rule by ethtool,
6212          * we need to clear all arfs rules
6213          */
6214         spin_lock_bh(&hdev->fd_rule_lock);
6215         hclge_clear_arfs_rules(handle);
6216
6217         ret = hclge_fd_config_rule(hdev, rule);
6218
6219         spin_unlock_bh(&hdev->fd_rule_lock);
6220
6221         return ret;
6222 }
6223
6224 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6225                               struct ethtool_rxnfc *cmd)
6226 {
6227         struct hclge_vport *vport = hclge_get_vport(handle);
6228         struct hclge_dev *hdev = vport->back;
6229         struct ethtool_rx_flow_spec *fs;
6230         int ret;
6231
6232         if (!hnae3_dev_fd_supported(hdev))
6233                 return -EOPNOTSUPP;
6234
6235         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6236
6237         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6238                 return -EINVAL;
6239
6240         if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6241             !hclge_fd_rule_exist(hdev, fs->location)) {
6242                 dev_err(&hdev->pdev->dev,
6243                         "Delete fail, rule %u is inexistent\n", fs->location);
6244                 return -ENOENT;
6245         }
6246
6247         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6248                                    NULL, false);
6249         if (ret)
6250                 return ret;
6251
6252         spin_lock_bh(&hdev->fd_rule_lock);
6253         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6254
6255         spin_unlock_bh(&hdev->fd_rule_lock);
6256
6257         return ret;
6258 }
6259
6260 /* make sure being called after lock up with fd_rule_lock */
6261 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6262                                      bool clear_list)
6263 {
6264         struct hclge_vport *vport = hclge_get_vport(handle);
6265         struct hclge_dev *hdev = vport->back;
6266         struct hclge_fd_rule *rule;
6267         struct hlist_node *node;
6268         u16 location;
6269
6270         if (!hnae3_dev_fd_supported(hdev))
6271                 return;
6272
6273         for_each_set_bit(location, hdev->fd_bmap,
6274                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6275                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6276                                      NULL, false);
6277
6278         if (clear_list) {
6279                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6280                                           rule_node) {
6281                         hlist_del(&rule->rule_node);
6282                         kfree(rule);
6283                 }
6284                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6285                 hdev->hclge_fd_rule_num = 0;
6286                 bitmap_zero(hdev->fd_bmap,
6287                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6288         }
6289 }
6290
6291 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6292 {
6293         struct hclge_vport *vport = hclge_get_vport(handle);
6294         struct hclge_dev *hdev = vport->back;
6295         struct hclge_fd_rule *rule;
6296         struct hlist_node *node;
6297         int ret;
6298
6299         /* Return ok here, because reset error handling will check this
6300          * return value. If error is returned here, the reset process will
6301          * fail.
6302          */
6303         if (!hnae3_dev_fd_supported(hdev))
6304                 return 0;
6305
6306         /* if fd is disabled, should not restore it when reset */
6307         if (!hdev->fd_en)
6308                 return 0;
6309
6310         spin_lock_bh(&hdev->fd_rule_lock);
6311         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6312                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6313                 if (!ret)
6314                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6315
6316                 if (ret) {
6317                         dev_warn(&hdev->pdev->dev,
6318                                  "Restore rule %u failed, remove it\n",
6319                                  rule->location);
6320                         clear_bit(rule->location, hdev->fd_bmap);
6321                         hlist_del(&rule->rule_node);
6322                         kfree(rule);
6323                         hdev->hclge_fd_rule_num--;
6324                 }
6325         }
6326
6327         if (hdev->hclge_fd_rule_num)
6328                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6329
6330         spin_unlock_bh(&hdev->fd_rule_lock);
6331
6332         return 0;
6333 }
6334
6335 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6336                                  struct ethtool_rxnfc *cmd)
6337 {
6338         struct hclge_vport *vport = hclge_get_vport(handle);
6339         struct hclge_dev *hdev = vport->back;
6340
6341         if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6342                 return -EOPNOTSUPP;
6343
6344         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6345         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6346
6347         return 0;
6348 }
6349
6350 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6351                                      struct ethtool_tcpip4_spec *spec,
6352                                      struct ethtool_tcpip4_spec *spec_mask)
6353 {
6354         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6355         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6356                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6357
6358         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6359         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6360                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6361
6362         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6363         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6364                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6365
6366         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6367         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6368                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6369
6370         spec->tos = rule->tuples.ip_tos;
6371         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6372                         0 : rule->tuples_mask.ip_tos;
6373 }
6374
6375 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6376                                   struct ethtool_usrip4_spec *spec,
6377                                   struct ethtool_usrip4_spec *spec_mask)
6378 {
6379         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6380         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6381                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6382
6383         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6384         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6385                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6386
6387         spec->tos = rule->tuples.ip_tos;
6388         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6389                         0 : rule->tuples_mask.ip_tos;
6390
6391         spec->proto = rule->tuples.ip_proto;
6392         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6393                         0 : rule->tuples_mask.ip_proto;
6394
6395         spec->ip_ver = ETH_RX_NFC_IP4;
6396 }
6397
6398 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6399                                      struct ethtool_tcpip6_spec *spec,
6400                                      struct ethtool_tcpip6_spec *spec_mask)
6401 {
6402         cpu_to_be32_array(spec->ip6src,
6403                           rule->tuples.src_ip, IPV6_SIZE);
6404         cpu_to_be32_array(spec->ip6dst,
6405                           rule->tuples.dst_ip, IPV6_SIZE);
6406         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6407                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6408         else
6409                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6410                                   IPV6_SIZE);
6411
6412         if (rule->unused_tuple & BIT(INNER_DST_IP))
6413                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6414         else
6415                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6416                                   IPV6_SIZE);
6417
6418         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6419         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6420                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6421
6422         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6423         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6424                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6425 }
6426
6427 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6428                                   struct ethtool_usrip6_spec *spec,
6429                                   struct ethtool_usrip6_spec *spec_mask)
6430 {
6431         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6432         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6433         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6434                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6435         else
6436                 cpu_to_be32_array(spec_mask->ip6src,
6437                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6438
6439         if (rule->unused_tuple & BIT(INNER_DST_IP))
6440                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6441         else
6442                 cpu_to_be32_array(spec_mask->ip6dst,
6443                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6444
6445         spec->l4_proto = rule->tuples.ip_proto;
6446         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6447                         0 : rule->tuples_mask.ip_proto;
6448 }
6449
6450 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6451                                     struct ethhdr *spec,
6452                                     struct ethhdr *spec_mask)
6453 {
6454         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6455         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6456
6457         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6458                 eth_zero_addr(spec_mask->h_source);
6459         else
6460                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6461
6462         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6463                 eth_zero_addr(spec_mask->h_dest);
6464         else
6465                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6466
6467         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6468         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6469                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6470 }
6471
6472 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6473                                   struct hclge_fd_rule *rule)
6474 {
6475         if (fs->flow_type & FLOW_EXT) {
6476                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6477                 fs->m_ext.vlan_tci =
6478                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6479                                 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6480         }
6481
6482         if (fs->flow_type & FLOW_MAC_EXT) {
6483                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6484                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6485                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6486                 else
6487                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6488                                         rule->tuples_mask.dst_mac);
6489         }
6490 }
6491
6492 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6493                                   struct ethtool_rxnfc *cmd)
6494 {
6495         struct hclge_vport *vport = hclge_get_vport(handle);
6496         struct hclge_fd_rule *rule = NULL;
6497         struct hclge_dev *hdev = vport->back;
6498         struct ethtool_rx_flow_spec *fs;
6499         struct hlist_node *node2;
6500
6501         if (!hnae3_dev_fd_supported(hdev))
6502                 return -EOPNOTSUPP;
6503
6504         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6505
6506         spin_lock_bh(&hdev->fd_rule_lock);
6507
6508         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6509                 if (rule->location >= fs->location)
6510                         break;
6511         }
6512
6513         if (!rule || fs->location != rule->location) {
6514                 spin_unlock_bh(&hdev->fd_rule_lock);
6515
6516                 return -ENOENT;
6517         }
6518
6519         fs->flow_type = rule->flow_type;
6520         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6521         case SCTP_V4_FLOW:
6522         case TCP_V4_FLOW:
6523         case UDP_V4_FLOW:
6524                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6525                                          &fs->m_u.tcp_ip4_spec);
6526                 break;
6527         case IP_USER_FLOW:
6528                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6529                                       &fs->m_u.usr_ip4_spec);
6530                 break;
6531         case SCTP_V6_FLOW:
6532         case TCP_V6_FLOW:
6533         case UDP_V6_FLOW:
6534                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6535                                          &fs->m_u.tcp_ip6_spec);
6536                 break;
6537         case IPV6_USER_FLOW:
6538                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6539                                       &fs->m_u.usr_ip6_spec);
6540                 break;
6541         /* The flow type of fd rule has been checked before adding in to rule
6542          * list. As other flow types have been handled, it must be ETHER_FLOW
6543          * for the default case
6544          */
6545         default:
6546                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6547                                         &fs->m_u.ether_spec);
6548                 break;
6549         }
6550
6551         hclge_fd_get_ext_info(fs, rule);
6552
6553         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6554                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6555         } else {
6556                 u64 vf_id;
6557
6558                 fs->ring_cookie = rule->queue_id;
6559                 vf_id = rule->vf_id;
6560                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6561                 fs->ring_cookie |= vf_id;
6562         }
6563
6564         spin_unlock_bh(&hdev->fd_rule_lock);
6565
6566         return 0;
6567 }
6568
6569 static int hclge_get_all_rules(struct hnae3_handle *handle,
6570                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6571 {
6572         struct hclge_vport *vport = hclge_get_vport(handle);
6573         struct hclge_dev *hdev = vport->back;
6574         struct hclge_fd_rule *rule;
6575         struct hlist_node *node2;
6576         int cnt = 0;
6577
6578         if (!hnae3_dev_fd_supported(hdev))
6579                 return -EOPNOTSUPP;
6580
6581         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6582
6583         spin_lock_bh(&hdev->fd_rule_lock);
6584         hlist_for_each_entry_safe(rule, node2,
6585                                   &hdev->fd_rule_list, rule_node) {
6586                 if (cnt == cmd->rule_cnt) {
6587                         spin_unlock_bh(&hdev->fd_rule_lock);
6588                         return -EMSGSIZE;
6589                 }
6590
6591                 rule_locs[cnt] = rule->location;
6592                 cnt++;
6593         }
6594
6595         spin_unlock_bh(&hdev->fd_rule_lock);
6596
6597         cmd->rule_cnt = cnt;
6598
6599         return 0;
6600 }
6601
6602 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6603                                      struct hclge_fd_rule_tuples *tuples)
6604 {
6605 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6606 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6607
6608         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6609         tuples->ip_proto = fkeys->basic.ip_proto;
6610         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6611
6612         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6613                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6614                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6615         } else {
6616                 int i;
6617
6618                 for (i = 0; i < IPV6_SIZE; i++) {
6619                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6620                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6621                 }
6622         }
6623 }
6624
6625 /* traverse all rules, check whether an existed rule has the same tuples */
6626 static struct hclge_fd_rule *
6627 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6628                           const struct hclge_fd_rule_tuples *tuples)
6629 {
6630         struct hclge_fd_rule *rule = NULL;
6631         struct hlist_node *node;
6632
6633         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6634                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6635                         return rule;
6636         }
6637
6638         return NULL;
6639 }
6640
6641 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6642                                      struct hclge_fd_rule *rule)
6643 {
6644         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6645                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6646                              BIT(INNER_SRC_PORT);
6647         rule->action = 0;
6648         rule->vf_id = 0;
6649         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6650         if (tuples->ether_proto == ETH_P_IP) {
6651                 if (tuples->ip_proto == IPPROTO_TCP)
6652                         rule->flow_type = TCP_V4_FLOW;
6653                 else
6654                         rule->flow_type = UDP_V4_FLOW;
6655         } else {
6656                 if (tuples->ip_proto == IPPROTO_TCP)
6657                         rule->flow_type = TCP_V6_FLOW;
6658                 else
6659                         rule->flow_type = UDP_V6_FLOW;
6660         }
6661         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6662         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6663 }
6664
6665 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6666                                       u16 flow_id, struct flow_keys *fkeys)
6667 {
6668         struct hclge_vport *vport = hclge_get_vport(handle);
6669         struct hclge_fd_rule_tuples new_tuples = {};
6670         struct hclge_dev *hdev = vport->back;
6671         struct hclge_fd_rule *rule;
6672         u16 tmp_queue_id;
6673         u16 bit_id;
6674         int ret;
6675
6676         if (!hnae3_dev_fd_supported(hdev))
6677                 return -EOPNOTSUPP;
6678
6679         /* when there is already fd rule existed add by user,
6680          * arfs should not work
6681          */
6682         spin_lock_bh(&hdev->fd_rule_lock);
6683         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6684             hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6685                 spin_unlock_bh(&hdev->fd_rule_lock);
6686                 return -EOPNOTSUPP;
6687         }
6688
6689         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6690
6691         /* check is there flow director filter existed for this flow,
6692          * if not, create a new filter for it;
6693          * if filter exist with different queue id, modify the filter;
6694          * if filter exist with same queue id, do nothing
6695          */
6696         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6697         if (!rule) {
6698                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6699                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6700                         spin_unlock_bh(&hdev->fd_rule_lock);
6701                         return -ENOSPC;
6702                 }
6703
6704                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6705                 if (!rule) {
6706                         spin_unlock_bh(&hdev->fd_rule_lock);
6707                         return -ENOMEM;
6708                 }
6709
6710                 set_bit(bit_id, hdev->fd_bmap);
6711                 rule->location = bit_id;
6712                 rule->arfs.flow_id = flow_id;
6713                 rule->queue_id = queue_id;
6714                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6715                 ret = hclge_fd_config_rule(hdev, rule);
6716
6717                 spin_unlock_bh(&hdev->fd_rule_lock);
6718
6719                 if (ret)
6720                         return ret;
6721
6722                 return rule->location;
6723         }
6724
6725         spin_unlock_bh(&hdev->fd_rule_lock);
6726
6727         if (rule->queue_id == queue_id)
6728                 return rule->location;
6729
6730         tmp_queue_id = rule->queue_id;
6731         rule->queue_id = queue_id;
6732         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6733         if (ret) {
6734                 rule->queue_id = tmp_queue_id;
6735                 return ret;
6736         }
6737
6738         return rule->location;
6739 }
6740
6741 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6742 {
6743 #ifdef CONFIG_RFS_ACCEL
6744         struct hnae3_handle *handle = &hdev->vport[0].nic;
6745         struct hclge_fd_rule *rule;
6746         struct hlist_node *node;
6747         HLIST_HEAD(del_list);
6748
6749         spin_lock_bh(&hdev->fd_rule_lock);
6750         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6751                 spin_unlock_bh(&hdev->fd_rule_lock);
6752                 return;
6753         }
6754         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6755                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6756                                         rule->arfs.flow_id, rule->location)) {
6757                         hlist_del_init(&rule->rule_node);
6758                         hlist_add_head(&rule->rule_node, &del_list);
6759                         hdev->hclge_fd_rule_num--;
6760                         clear_bit(rule->location, hdev->fd_bmap);
6761                 }
6762         }
6763         spin_unlock_bh(&hdev->fd_rule_lock);
6764
6765         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6766                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6767                                      rule->location, NULL, false);
6768                 kfree(rule);
6769         }
6770 #endif
6771 }
6772
6773 /* make sure being called after lock up with fd_rule_lock */
6774 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6775 {
6776 #ifdef CONFIG_RFS_ACCEL
6777         struct hclge_vport *vport = hclge_get_vport(handle);
6778         struct hclge_dev *hdev = vport->back;
6779
6780         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6781                 hclge_del_all_fd_entries(handle, true);
6782 #endif
6783 }
6784
6785 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6786                                     struct hclge_fd_rule *rule)
6787 {
6788         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6789                 struct flow_match_basic match;
6790                 u16 ethtype_key, ethtype_mask;
6791
6792                 flow_rule_match_basic(flow, &match);
6793                 ethtype_key = ntohs(match.key->n_proto);
6794                 ethtype_mask = ntohs(match.mask->n_proto);
6795
6796                 if (ethtype_key == ETH_P_ALL) {
6797                         ethtype_key = 0;
6798                         ethtype_mask = 0;
6799                 }
6800                 rule->tuples.ether_proto = ethtype_key;
6801                 rule->tuples_mask.ether_proto = ethtype_mask;
6802                 rule->tuples.ip_proto = match.key->ip_proto;
6803                 rule->tuples_mask.ip_proto = match.mask->ip_proto;
6804         } else {
6805                 rule->unused_tuple |= BIT(INNER_IP_PROTO);
6806                 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6807         }
6808 }
6809
6810 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6811                                   struct hclge_fd_rule *rule)
6812 {
6813         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6814                 struct flow_match_eth_addrs match;
6815
6816                 flow_rule_match_eth_addrs(flow, &match);
6817                 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6818                 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6819                 ether_addr_copy(rule->tuples.src_mac, match.key->src);
6820                 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6821         } else {
6822                 rule->unused_tuple |= BIT(INNER_DST_MAC);
6823                 rule->unused_tuple |= BIT(INNER_SRC_MAC);
6824         }
6825 }
6826
6827 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6828                                    struct hclge_fd_rule *rule)
6829 {
6830         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6831                 struct flow_match_vlan match;
6832
6833                 flow_rule_match_vlan(flow, &match);
6834                 rule->tuples.vlan_tag1 = match.key->vlan_id |
6835                                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
6836                 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6837                                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6838         } else {
6839                 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6840         }
6841 }
6842
6843 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6844                                  struct hclge_fd_rule *rule)
6845 {
6846         u16 addr_type = 0;
6847
6848         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6849                 struct flow_match_control match;
6850
6851                 flow_rule_match_control(flow, &match);
6852                 addr_type = match.key->addr_type;
6853         }
6854
6855         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6856                 struct flow_match_ipv4_addrs match;
6857
6858                 flow_rule_match_ipv4_addrs(flow, &match);
6859                 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6860                 rule->tuples_mask.src_ip[IPV4_INDEX] =
6861                                                 be32_to_cpu(match.mask->src);
6862                 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6863                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6864                                                 be32_to_cpu(match.mask->dst);
6865         } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6866                 struct flow_match_ipv6_addrs match;
6867
6868                 flow_rule_match_ipv6_addrs(flow, &match);
6869                 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6870                                   IPV6_SIZE);
6871                 be32_to_cpu_array(rule->tuples_mask.src_ip,
6872                                   match.mask->src.s6_addr32, IPV6_SIZE);
6873                 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6874                                   IPV6_SIZE);
6875                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6876                                   match.mask->dst.s6_addr32, IPV6_SIZE);
6877         } else {
6878                 rule->unused_tuple |= BIT(INNER_SRC_IP);
6879                 rule->unused_tuple |= BIT(INNER_DST_IP);
6880         }
6881 }
6882
6883 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6884                                    struct hclge_fd_rule *rule)
6885 {
6886         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6887                 struct flow_match_ports match;
6888
6889                 flow_rule_match_ports(flow, &match);
6890
6891                 rule->tuples.src_port = be16_to_cpu(match.key->src);
6892                 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6893                 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6894                 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6895         } else {
6896                 rule->unused_tuple |= BIT(INNER_SRC_PORT);
6897                 rule->unused_tuple |= BIT(INNER_DST_PORT);
6898         }
6899 }
6900
6901 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6902                                   struct flow_cls_offload *cls_flower,
6903                                   struct hclge_fd_rule *rule)
6904 {
6905         struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6906         struct flow_dissector *dissector = flow->match.dissector;
6907
6908         if (dissector->used_keys &
6909             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6910               BIT(FLOW_DISSECTOR_KEY_BASIC) |
6911               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6912               BIT(FLOW_DISSECTOR_KEY_VLAN) |
6913               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6914               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6915               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6916                 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6917                         dissector->used_keys);
6918                 return -EOPNOTSUPP;
6919         }
6920
6921         hclge_get_cls_key_basic(flow, rule);
6922         hclge_get_cls_key_mac(flow, rule);
6923         hclge_get_cls_key_vlan(flow, rule);
6924         hclge_get_cls_key_ip(flow, rule);
6925         hclge_get_cls_key_port(flow, rule);
6926
6927         return 0;
6928 }
6929
6930 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6931                                   struct flow_cls_offload *cls_flower, int tc)
6932 {
6933         u32 prio = cls_flower->common.prio;
6934
6935         if (tc < 0 || tc > hdev->tc_max) {
6936                 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6937                 return -EINVAL;
6938         }
6939
6940         if (prio == 0 ||
6941             prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6942                 dev_err(&hdev->pdev->dev,
6943                         "prio %u should be in range[1, %u]\n",
6944                         prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6945                 return -EINVAL;
6946         }
6947
6948         if (test_bit(prio - 1, hdev->fd_bmap)) {
6949                 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6950                 return -EINVAL;
6951         }
6952         return 0;
6953 }
6954
6955 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6956                                 struct flow_cls_offload *cls_flower,
6957                                 int tc)
6958 {
6959         struct hclge_vport *vport = hclge_get_vport(handle);
6960         struct hclge_dev *hdev = vport->back;
6961         struct hclge_fd_rule *rule;
6962         int ret;
6963
6964         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6965                 dev_err(&hdev->pdev->dev,
6966                         "please remove all exist fd rules via ethtool first\n");
6967                 return -EINVAL;
6968         }
6969
6970         ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6971         if (ret) {
6972                 dev_err(&hdev->pdev->dev,
6973                         "failed to check cls flower params, ret = %d\n", ret);
6974                 return ret;
6975         }
6976
6977         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6978         if (!rule)
6979                 return -ENOMEM;
6980
6981         ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6982         if (ret)
6983                 goto err;
6984
6985         rule->action = HCLGE_FD_ACTION_SELECT_TC;
6986         rule->cls_flower.tc = tc;
6987         rule->location = cls_flower->common.prio - 1;
6988         rule->vf_id = 0;
6989         rule->cls_flower.cookie = cls_flower->cookie;
6990         rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6991
6992         spin_lock_bh(&hdev->fd_rule_lock);
6993         hclge_clear_arfs_rules(handle);
6994
6995         ret = hclge_fd_config_rule(hdev, rule);
6996
6997         spin_unlock_bh(&hdev->fd_rule_lock);
6998
6999         if (ret) {
7000                 dev_err(&hdev->pdev->dev,
7001                         "failed to add cls flower rule, ret = %d\n", ret);
7002                 goto err;
7003         }
7004
7005         return 0;
7006 err:
7007         kfree(rule);
7008         return ret;
7009 }
7010
7011 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7012                                                    unsigned long cookie)
7013 {
7014         struct hclge_fd_rule *rule;
7015         struct hlist_node *node;
7016
7017         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7018                 if (rule->cls_flower.cookie == cookie)
7019                         return rule;
7020         }
7021
7022         return NULL;
7023 }
7024
7025 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7026                                 struct flow_cls_offload *cls_flower)
7027 {
7028         struct hclge_vport *vport = hclge_get_vport(handle);
7029         struct hclge_dev *hdev = vport->back;
7030         struct hclge_fd_rule *rule;
7031         int ret;
7032
7033         spin_lock_bh(&hdev->fd_rule_lock);
7034
7035         rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7036         if (!rule) {
7037                 spin_unlock_bh(&hdev->fd_rule_lock);
7038                 return -EINVAL;
7039         }
7040
7041         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7042                                    NULL, false);
7043         if (ret) {
7044                 dev_err(&hdev->pdev->dev,
7045                         "failed to delete cls flower rule %u, ret = %d\n",
7046                         rule->location, ret);
7047                 spin_unlock_bh(&hdev->fd_rule_lock);
7048                 return ret;
7049         }
7050
7051         ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
7052         if (ret) {
7053                 dev_err(&hdev->pdev->dev,
7054                         "failed to delete cls flower rule %u in list, ret = %d\n",
7055                         rule->location, ret);
7056                 spin_unlock_bh(&hdev->fd_rule_lock);
7057                 return ret;
7058         }
7059
7060         spin_unlock_bh(&hdev->fd_rule_lock);
7061
7062         return 0;
7063 }
7064
7065 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7066 {
7067         struct hclge_vport *vport = hclge_get_vport(handle);
7068         struct hclge_dev *hdev = vport->back;
7069
7070         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7071                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7072 }
7073
7074 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7075 {
7076         struct hclge_vport *vport = hclge_get_vport(handle);
7077         struct hclge_dev *hdev = vport->back;
7078
7079         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7080 }
7081
7082 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7083 {
7084         struct hclge_vport *vport = hclge_get_vport(handle);
7085         struct hclge_dev *hdev = vport->back;
7086
7087         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7088 }
7089
7090 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7091 {
7092         struct hclge_vport *vport = hclge_get_vport(handle);
7093         struct hclge_dev *hdev = vport->back;
7094
7095         return hdev->rst_stats.hw_reset_done_cnt;
7096 }
7097
7098 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7099 {
7100         struct hclge_vport *vport = hclge_get_vport(handle);
7101         struct hclge_dev *hdev = vport->back;
7102         bool clear;
7103
7104         hdev->fd_en = enable;
7105         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7106
7107         if (!enable) {
7108                 spin_lock_bh(&hdev->fd_rule_lock);
7109                 hclge_del_all_fd_entries(handle, clear);
7110                 spin_unlock_bh(&hdev->fd_rule_lock);
7111         } else {
7112                 hclge_restore_fd_entries(handle);
7113         }
7114 }
7115
7116 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7117 {
7118         struct hclge_desc desc;
7119         struct hclge_config_mac_mode_cmd *req =
7120                 (struct hclge_config_mac_mode_cmd *)desc.data;
7121         u32 loop_en = 0;
7122         int ret;
7123
7124         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7125
7126         if (enable) {
7127                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7128                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7129                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7130                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7131                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7132                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7133                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7134                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7135                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7136                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7137         }
7138
7139         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7140
7141         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7142         if (ret)
7143                 dev_err(&hdev->pdev->dev,
7144                         "mac enable fail, ret =%d.\n", ret);
7145 }
7146
7147 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7148                                      u8 switch_param, u8 param_mask)
7149 {
7150         struct hclge_mac_vlan_switch_cmd *req;
7151         struct hclge_desc desc;
7152         u32 func_id;
7153         int ret;
7154
7155         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7156         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7157
7158         /* read current config parameter */
7159         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7160                                    true);
7161         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7162         req->func_id = cpu_to_le32(func_id);
7163
7164         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7165         if (ret) {
7166                 dev_err(&hdev->pdev->dev,
7167                         "read mac vlan switch parameter fail, ret = %d\n", ret);
7168                 return ret;
7169         }
7170
7171         /* modify and write new config parameter */
7172         hclge_cmd_reuse_desc(&desc, false);
7173         req->switch_param = (req->switch_param & param_mask) | switch_param;
7174         req->param_mask = param_mask;
7175
7176         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7177         if (ret)
7178                 dev_err(&hdev->pdev->dev,
7179                         "set mac vlan switch parameter fail, ret = %d\n", ret);
7180         return ret;
7181 }
7182
7183 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7184                                        int link_ret)
7185 {
7186 #define HCLGE_PHY_LINK_STATUS_NUM  200
7187
7188         struct phy_device *phydev = hdev->hw.mac.phydev;
7189         int i = 0;
7190         int ret;
7191
7192         do {
7193                 ret = phy_read_status(phydev);
7194                 if (ret) {
7195                         dev_err(&hdev->pdev->dev,
7196                                 "phy update link status fail, ret = %d\n", ret);
7197                         return;
7198                 }
7199
7200                 if (phydev->link == link_ret)
7201                         break;
7202
7203                 msleep(HCLGE_LINK_STATUS_MS);
7204         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7205 }
7206
7207 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7208 {
7209 #define HCLGE_MAC_LINK_STATUS_NUM  100
7210
7211         int link_status;
7212         int i = 0;
7213         int ret;
7214
7215         do {
7216                 ret = hclge_get_mac_link_status(hdev, &link_status);
7217                 if (ret)
7218                         return ret;
7219                 if (link_status == link_ret)
7220                         return 0;
7221
7222                 msleep(HCLGE_LINK_STATUS_MS);
7223         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7224         return -EBUSY;
7225 }
7226
7227 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7228                                           bool is_phy)
7229 {
7230         int link_ret;
7231
7232         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7233
7234         if (is_phy)
7235                 hclge_phy_link_status_wait(hdev, link_ret);
7236
7237         return hclge_mac_link_status_wait(hdev, link_ret);
7238 }
7239
7240 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7241 {
7242         struct hclge_config_mac_mode_cmd *req;
7243         struct hclge_desc desc;
7244         u32 loop_en;
7245         int ret;
7246
7247         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7248         /* 1 Read out the MAC mode config at first */
7249         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7250         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7251         if (ret) {
7252                 dev_err(&hdev->pdev->dev,
7253                         "mac loopback get fail, ret =%d.\n", ret);
7254                 return ret;
7255         }
7256
7257         /* 2 Then setup the loopback flag */
7258         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7259         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7260
7261         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7262
7263         /* 3 Config mac work mode with loopback flag
7264          * and its original configure parameters
7265          */
7266         hclge_cmd_reuse_desc(&desc, false);
7267         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7268         if (ret)
7269                 dev_err(&hdev->pdev->dev,
7270                         "mac loopback set fail, ret =%d.\n", ret);
7271         return ret;
7272 }
7273
7274 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7275                                      enum hnae3_loop loop_mode)
7276 {
7277 #define HCLGE_COMMON_LB_RETRY_MS        10
7278 #define HCLGE_COMMON_LB_RETRY_NUM       100
7279
7280         struct hclge_common_lb_cmd *req;
7281         struct hclge_desc desc;
7282         int ret, i = 0;
7283         u8 loop_mode_b;
7284
7285         req = (struct hclge_common_lb_cmd *)desc.data;
7286         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7287
7288         switch (loop_mode) {
7289         case HNAE3_LOOP_SERIAL_SERDES:
7290                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7291                 break;
7292         case HNAE3_LOOP_PARALLEL_SERDES:
7293                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7294                 break;
7295         case HNAE3_LOOP_PHY:
7296                 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7297                 break;
7298         default:
7299                 dev_err(&hdev->pdev->dev,
7300                         "unsupported common loopback mode %d\n", loop_mode);
7301                 return -ENOTSUPP;
7302         }
7303
7304         if (en) {
7305                 req->enable = loop_mode_b;
7306                 req->mask = loop_mode_b;
7307         } else {
7308                 req->mask = loop_mode_b;
7309         }
7310
7311         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7312         if (ret) {
7313                 dev_err(&hdev->pdev->dev,
7314                         "common loopback set fail, ret = %d\n", ret);
7315                 return ret;
7316         }
7317
7318         do {
7319                 msleep(HCLGE_COMMON_LB_RETRY_MS);
7320                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7321                                            true);
7322                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7323                 if (ret) {
7324                         dev_err(&hdev->pdev->dev,
7325                                 "common loopback get, ret = %d\n", ret);
7326                         return ret;
7327                 }
7328         } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7329                  !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7330
7331         if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7332                 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7333                 return -EBUSY;
7334         } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7335                 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7336                 return -EIO;
7337         }
7338         return ret;
7339 }
7340
7341 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7342                                      enum hnae3_loop loop_mode)
7343 {
7344         int ret;
7345
7346         ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7347         if (ret)
7348                 return ret;
7349
7350         hclge_cfg_mac_mode(hdev, en);
7351
7352         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7353         if (ret)
7354                 dev_err(&hdev->pdev->dev,
7355                         "serdes loopback config mac mode timeout\n");
7356
7357         return ret;
7358 }
7359
7360 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7361                                      struct phy_device *phydev)
7362 {
7363         int ret;
7364
7365         if (!phydev->suspended) {
7366                 ret = phy_suspend(phydev);
7367                 if (ret)
7368                         return ret;
7369         }
7370
7371         ret = phy_resume(phydev);
7372         if (ret)
7373                 return ret;
7374
7375         return phy_loopback(phydev, true);
7376 }
7377
7378 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7379                                       struct phy_device *phydev)
7380 {
7381         int ret;
7382
7383         ret = phy_loopback(phydev, false);
7384         if (ret)
7385                 return ret;
7386
7387         return phy_suspend(phydev);
7388 }
7389
7390 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7391 {
7392         struct phy_device *phydev = hdev->hw.mac.phydev;
7393         int ret;
7394
7395         if (!phydev) {
7396                 if (hnae3_dev_phy_imp_supported(hdev))
7397                         return hclge_set_common_loopback(hdev, en,
7398                                                          HNAE3_LOOP_PHY);
7399                 return -ENOTSUPP;
7400         }
7401
7402         if (en)
7403                 ret = hclge_enable_phy_loopback(hdev, phydev);
7404         else
7405                 ret = hclge_disable_phy_loopback(hdev, phydev);
7406         if (ret) {
7407                 dev_err(&hdev->pdev->dev,
7408                         "set phy loopback fail, ret = %d\n", ret);
7409                 return ret;
7410         }
7411
7412         hclge_cfg_mac_mode(hdev, en);
7413
7414         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7415         if (ret)
7416                 dev_err(&hdev->pdev->dev,
7417                         "phy loopback config mac mode timeout\n");
7418
7419         return ret;
7420 }
7421
7422 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7423                             int stream_id, bool enable)
7424 {
7425         struct hclge_desc desc;
7426         struct hclge_cfg_com_tqp_queue_cmd *req =
7427                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7428         int ret;
7429
7430         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7431         req->tqp_id = cpu_to_le16(tqp_id);
7432         req->stream_id = cpu_to_le16(stream_id);
7433         if (enable)
7434                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7435
7436         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7437         if (ret)
7438                 dev_err(&hdev->pdev->dev,
7439                         "Tqp enable fail, status =%d.\n", ret);
7440         return ret;
7441 }
7442
7443 static int hclge_set_loopback(struct hnae3_handle *handle,
7444                               enum hnae3_loop loop_mode, bool en)
7445 {
7446         struct hclge_vport *vport = hclge_get_vport(handle);
7447         struct hnae3_knic_private_info *kinfo;
7448         struct hclge_dev *hdev = vport->back;
7449         int i, ret;
7450
7451         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7452          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7453          * the same, the packets are looped back in the SSU. If SSU loopback
7454          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7455          */
7456         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7457                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7458
7459                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7460                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
7461                 if (ret)
7462                         return ret;
7463         }
7464
7465         switch (loop_mode) {
7466         case HNAE3_LOOP_APP:
7467                 ret = hclge_set_app_loopback(hdev, en);
7468                 break;
7469         case HNAE3_LOOP_SERIAL_SERDES:
7470         case HNAE3_LOOP_PARALLEL_SERDES:
7471                 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7472                 break;
7473         case HNAE3_LOOP_PHY:
7474                 ret = hclge_set_phy_loopback(hdev, en);
7475                 break;
7476         default:
7477                 ret = -ENOTSUPP;
7478                 dev_err(&hdev->pdev->dev,
7479                         "loop_mode %d is not supported\n", loop_mode);
7480                 break;
7481         }
7482
7483         if (ret)
7484                 return ret;
7485
7486         kinfo = &vport->nic.kinfo;
7487         for (i = 0; i < kinfo->num_tqps; i++) {
7488                 ret = hclge_tqp_enable(hdev, i, 0, en);
7489                 if (ret)
7490                         return ret;
7491         }
7492
7493         return 0;
7494 }
7495
7496 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7497 {
7498         int ret;
7499
7500         ret = hclge_set_app_loopback(hdev, false);
7501         if (ret)
7502                 return ret;
7503
7504         ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7505         if (ret)
7506                 return ret;
7507
7508         return hclge_cfg_common_loopback(hdev, false,
7509                                          HNAE3_LOOP_PARALLEL_SERDES);
7510 }
7511
7512 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7513 {
7514         struct hclge_vport *vport = hclge_get_vport(handle);
7515         struct hnae3_knic_private_info *kinfo;
7516         struct hnae3_queue *queue;
7517         struct hclge_tqp *tqp;
7518         int i;
7519
7520         kinfo = &vport->nic.kinfo;
7521         for (i = 0; i < kinfo->num_tqps; i++) {
7522                 queue = handle->kinfo.tqp[i];
7523                 tqp = container_of(queue, struct hclge_tqp, q);
7524                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7525         }
7526 }
7527
7528 static void hclge_flush_link_update(struct hclge_dev *hdev)
7529 {
7530 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
7531
7532         unsigned long last = hdev->serv_processed_cnt;
7533         int i = 0;
7534
7535         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7536                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7537                last == hdev->serv_processed_cnt)
7538                 usleep_range(1, 1);
7539 }
7540
7541 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7542 {
7543         struct hclge_vport *vport = hclge_get_vport(handle);
7544         struct hclge_dev *hdev = vport->back;
7545
7546         if (enable) {
7547                 hclge_task_schedule(hdev, 0);
7548         } else {
7549                 /* Set the DOWN flag here to disable link updating */
7550                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7551
7552                 /* flush memory to make sure DOWN is seen by service task */
7553                 smp_mb__before_atomic();
7554                 hclge_flush_link_update(hdev);
7555         }
7556 }
7557
7558 static int hclge_ae_start(struct hnae3_handle *handle)
7559 {
7560         struct hclge_vport *vport = hclge_get_vport(handle);
7561         struct hclge_dev *hdev = vport->back;
7562
7563         /* mac enable */
7564         hclge_cfg_mac_mode(hdev, true);
7565         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7566         hdev->hw.mac.link = 0;
7567
7568         /* reset tqp stats */
7569         hclge_reset_tqp_stats(handle);
7570
7571         hclge_mac_start_phy(hdev);
7572
7573         return 0;
7574 }
7575
7576 static void hclge_ae_stop(struct hnae3_handle *handle)
7577 {
7578         struct hclge_vport *vport = hclge_get_vport(handle);
7579         struct hclge_dev *hdev = vport->back;
7580         int i;
7581
7582         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7583         spin_lock_bh(&hdev->fd_rule_lock);
7584         hclge_clear_arfs_rules(handle);
7585         spin_unlock_bh(&hdev->fd_rule_lock);
7586
7587         /* If it is not PF reset, the firmware will disable the MAC,
7588          * so it only need to stop phy here.
7589          */
7590         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7591             hdev->reset_type != HNAE3_FUNC_RESET) {
7592                 hclge_mac_stop_phy(hdev);
7593                 hclge_update_link_status(hdev);
7594                 return;
7595         }
7596
7597         for (i = 0; i < handle->kinfo.num_tqps; i++)
7598                 hclge_reset_tqp(handle, i);
7599
7600         hclge_config_mac_tnl_int(hdev, false);
7601
7602         /* Mac disable */
7603         hclge_cfg_mac_mode(hdev, false);
7604
7605         hclge_mac_stop_phy(hdev);
7606
7607         /* reset tqp stats */
7608         hclge_reset_tqp_stats(handle);
7609         hclge_update_link_status(hdev);
7610 }
7611
7612 int hclge_vport_start(struct hclge_vport *vport)
7613 {
7614         struct hclge_dev *hdev = vport->back;
7615
7616         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7617         vport->last_active_jiffies = jiffies;
7618
7619         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7620                 if (vport->vport_id) {
7621                         hclge_restore_mac_table_common(vport);
7622                         hclge_restore_vport_vlan_table(vport);
7623                 } else {
7624                         hclge_restore_hw_table(hdev);
7625                 }
7626         }
7627
7628         clear_bit(vport->vport_id, hdev->vport_config_block);
7629
7630         return 0;
7631 }
7632
7633 void hclge_vport_stop(struct hclge_vport *vport)
7634 {
7635         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7636 }
7637
7638 static int hclge_client_start(struct hnae3_handle *handle)
7639 {
7640         struct hclge_vport *vport = hclge_get_vport(handle);
7641
7642         return hclge_vport_start(vport);
7643 }
7644
7645 static void hclge_client_stop(struct hnae3_handle *handle)
7646 {
7647         struct hclge_vport *vport = hclge_get_vport(handle);
7648
7649         hclge_vport_stop(vport);
7650 }
7651
7652 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7653                                          u16 cmdq_resp, u8  resp_code,
7654                                          enum hclge_mac_vlan_tbl_opcode op)
7655 {
7656         struct hclge_dev *hdev = vport->back;
7657
7658         if (cmdq_resp) {
7659                 dev_err(&hdev->pdev->dev,
7660                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7661                         cmdq_resp);
7662                 return -EIO;
7663         }
7664
7665         if (op == HCLGE_MAC_VLAN_ADD) {
7666                 if (!resp_code || resp_code == 1)
7667                         return 0;
7668                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7669                          resp_code == HCLGE_ADD_MC_OVERFLOW)
7670                         return -ENOSPC;
7671
7672                 dev_err(&hdev->pdev->dev,
7673                         "add mac addr failed for undefined, code=%u.\n",
7674                         resp_code);
7675                 return -EIO;
7676         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7677                 if (!resp_code) {
7678                         return 0;
7679                 } else if (resp_code == 1) {
7680                         dev_dbg(&hdev->pdev->dev,
7681                                 "remove mac addr failed for miss.\n");
7682                         return -ENOENT;
7683                 }
7684
7685                 dev_err(&hdev->pdev->dev,
7686                         "remove mac addr failed for undefined, code=%u.\n",
7687                         resp_code);
7688                 return -EIO;
7689         } else if (op == HCLGE_MAC_VLAN_LKUP) {
7690                 if (!resp_code) {
7691                         return 0;
7692                 } else if (resp_code == 1) {
7693                         dev_dbg(&hdev->pdev->dev,
7694                                 "lookup mac addr failed for miss.\n");
7695                         return -ENOENT;
7696                 }
7697
7698                 dev_err(&hdev->pdev->dev,
7699                         "lookup mac addr failed for undefined, code=%u.\n",
7700                         resp_code);
7701                 return -EIO;
7702         }
7703
7704         dev_err(&hdev->pdev->dev,
7705                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7706
7707         return -EINVAL;
7708 }
7709
7710 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7711 {
7712 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7713
7714         unsigned int word_num;
7715         unsigned int bit_num;
7716
7717         if (vfid > 255 || vfid < 0)
7718                 return -EIO;
7719
7720         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7721                 word_num = vfid / 32;
7722                 bit_num  = vfid % 32;
7723                 if (clr)
7724                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7725                 else
7726                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7727         } else {
7728                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7729                 bit_num  = vfid % 32;
7730                 if (clr)
7731                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7732                 else
7733                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7734         }
7735
7736         return 0;
7737 }
7738
7739 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7740 {
7741 #define HCLGE_DESC_NUMBER 3
7742 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7743         int i, j;
7744
7745         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7746                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7747                         if (desc[i].data[j])
7748                                 return false;
7749
7750         return true;
7751 }
7752
7753 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7754                                    const u8 *addr, bool is_mc)
7755 {
7756         const unsigned char *mac_addr = addr;
7757         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7758                        (mac_addr[0]) | (mac_addr[1] << 8);
7759         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7760
7761         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7762         if (is_mc) {
7763                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7764                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7765         }
7766
7767         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7768         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7769 }
7770
7771 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7772                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
7773 {
7774         struct hclge_dev *hdev = vport->back;
7775         struct hclge_desc desc;
7776         u8 resp_code;
7777         u16 retval;
7778         int ret;
7779
7780         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7781
7782         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7783
7784         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7785         if (ret) {
7786                 dev_err(&hdev->pdev->dev,
7787                         "del mac addr failed for cmd_send, ret =%d.\n",
7788                         ret);
7789                 return ret;
7790         }
7791         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7792         retval = le16_to_cpu(desc.retval);
7793
7794         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7795                                              HCLGE_MAC_VLAN_REMOVE);
7796 }
7797
7798 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7799                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7800                                      struct hclge_desc *desc,
7801                                      bool is_mc)
7802 {
7803         struct hclge_dev *hdev = vport->back;
7804         u8 resp_code;
7805         u16 retval;
7806         int ret;
7807
7808         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7809         if (is_mc) {
7810                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7811                 memcpy(desc[0].data,
7812                        req,
7813                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7814                 hclge_cmd_setup_basic_desc(&desc[1],
7815                                            HCLGE_OPC_MAC_VLAN_ADD,
7816                                            true);
7817                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7818                 hclge_cmd_setup_basic_desc(&desc[2],
7819                                            HCLGE_OPC_MAC_VLAN_ADD,
7820                                            true);
7821                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7822         } else {
7823                 memcpy(desc[0].data,
7824                        req,
7825                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7826                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7827         }
7828         if (ret) {
7829                 dev_err(&hdev->pdev->dev,
7830                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7831                         ret);
7832                 return ret;
7833         }
7834         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7835         retval = le16_to_cpu(desc[0].retval);
7836
7837         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7838                                              HCLGE_MAC_VLAN_LKUP);
7839 }
7840
7841 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7842                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7843                                   struct hclge_desc *mc_desc)
7844 {
7845         struct hclge_dev *hdev = vport->back;
7846         int cfg_status;
7847         u8 resp_code;
7848         u16 retval;
7849         int ret;
7850
7851         if (!mc_desc) {
7852                 struct hclge_desc desc;
7853
7854                 hclge_cmd_setup_basic_desc(&desc,
7855                                            HCLGE_OPC_MAC_VLAN_ADD,
7856                                            false);
7857                 memcpy(desc.data, req,
7858                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7859                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7860                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7861                 retval = le16_to_cpu(desc.retval);
7862
7863                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7864                                                            resp_code,
7865                                                            HCLGE_MAC_VLAN_ADD);
7866         } else {
7867                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7868                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7869                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7870                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7871                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7872                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7873                 memcpy(mc_desc[0].data, req,
7874                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7875                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7876                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7877                 retval = le16_to_cpu(mc_desc[0].retval);
7878
7879                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7880                                                            resp_code,
7881                                                            HCLGE_MAC_VLAN_ADD);
7882         }
7883
7884         if (ret) {
7885                 dev_err(&hdev->pdev->dev,
7886                         "add mac addr failed for cmd_send, ret =%d.\n",
7887                         ret);
7888                 return ret;
7889         }
7890
7891         return cfg_status;
7892 }
7893
7894 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7895                                u16 *allocated_size)
7896 {
7897         struct hclge_umv_spc_alc_cmd *req;
7898         struct hclge_desc desc;
7899         int ret;
7900
7901         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7902         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7903
7904         req->space_size = cpu_to_le32(space_size);
7905
7906         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7907         if (ret) {
7908                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7909                         ret);
7910                 return ret;
7911         }
7912
7913         *allocated_size = le32_to_cpu(desc.data[1]);
7914
7915         return 0;
7916 }
7917
7918 static int hclge_init_umv_space(struct hclge_dev *hdev)
7919 {
7920         u16 allocated_size = 0;
7921         int ret;
7922
7923         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7924         if (ret)
7925                 return ret;
7926
7927         if (allocated_size < hdev->wanted_umv_size)
7928                 dev_warn(&hdev->pdev->dev,
7929                          "failed to alloc umv space, want %u, get %u\n",
7930                          hdev->wanted_umv_size, allocated_size);
7931
7932         hdev->max_umv_size = allocated_size;
7933         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7934         hdev->share_umv_size = hdev->priv_umv_size +
7935                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7936
7937         return 0;
7938 }
7939
7940 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7941 {
7942         struct hclge_vport *vport;
7943         int i;
7944
7945         for (i = 0; i < hdev->num_alloc_vport; i++) {
7946                 vport = &hdev->vport[i];
7947                 vport->used_umv_num = 0;
7948         }
7949
7950         mutex_lock(&hdev->vport_lock);
7951         hdev->share_umv_size = hdev->priv_umv_size +
7952                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7953         mutex_unlock(&hdev->vport_lock);
7954 }
7955
7956 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7957 {
7958         struct hclge_dev *hdev = vport->back;
7959         bool is_full;
7960
7961         if (need_lock)
7962                 mutex_lock(&hdev->vport_lock);
7963
7964         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7965                    hdev->share_umv_size == 0);
7966
7967         if (need_lock)
7968                 mutex_unlock(&hdev->vport_lock);
7969
7970         return is_full;
7971 }
7972
7973 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7974 {
7975         struct hclge_dev *hdev = vport->back;
7976
7977         if (is_free) {
7978                 if (vport->used_umv_num > hdev->priv_umv_size)
7979                         hdev->share_umv_size++;
7980
7981                 if (vport->used_umv_num > 0)
7982                         vport->used_umv_num--;
7983         } else {
7984                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7985                     hdev->share_umv_size > 0)
7986                         hdev->share_umv_size--;
7987                 vport->used_umv_num++;
7988         }
7989 }
7990
7991 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7992                                                   const u8 *mac_addr)
7993 {
7994         struct hclge_mac_node *mac_node, *tmp;
7995
7996         list_for_each_entry_safe(mac_node, tmp, list, node)
7997                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7998                         return mac_node;
7999
8000         return NULL;
8001 }
8002
8003 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8004                                   enum HCLGE_MAC_NODE_STATE state)
8005 {
8006         switch (state) {
8007         /* from set_rx_mode or tmp_add_list */
8008         case HCLGE_MAC_TO_ADD:
8009                 if (mac_node->state == HCLGE_MAC_TO_DEL)
8010                         mac_node->state = HCLGE_MAC_ACTIVE;
8011                 break;
8012         /* only from set_rx_mode */
8013         case HCLGE_MAC_TO_DEL:
8014                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8015                         list_del(&mac_node->node);
8016                         kfree(mac_node);
8017                 } else {
8018                         mac_node->state = HCLGE_MAC_TO_DEL;
8019                 }
8020                 break;
8021         /* only from tmp_add_list, the mac_node->state won't be
8022          * ACTIVE.
8023          */
8024         case HCLGE_MAC_ACTIVE:
8025                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8026                         mac_node->state = HCLGE_MAC_ACTIVE;
8027
8028                 break;
8029         }
8030 }
8031
8032 int hclge_update_mac_list(struct hclge_vport *vport,
8033                           enum HCLGE_MAC_NODE_STATE state,
8034                           enum HCLGE_MAC_ADDR_TYPE mac_type,
8035                           const unsigned char *addr)
8036 {
8037         struct hclge_dev *hdev = vport->back;
8038         struct hclge_mac_node *mac_node;
8039         struct list_head *list;
8040
8041         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8042                 &vport->uc_mac_list : &vport->mc_mac_list;
8043
8044         spin_lock_bh(&vport->mac_list_lock);
8045
8046         /* if the mac addr is already in the mac list, no need to add a new
8047          * one into it, just check the mac addr state, convert it to a new
8048          * new state, or just remove it, or do nothing.
8049          */
8050         mac_node = hclge_find_mac_node(list, addr);
8051         if (mac_node) {
8052                 hclge_update_mac_node(mac_node, state);
8053                 spin_unlock_bh(&vport->mac_list_lock);
8054                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8055                 return 0;
8056         }
8057
8058         /* if this address is never added, unnecessary to delete */
8059         if (state == HCLGE_MAC_TO_DEL) {
8060                 spin_unlock_bh(&vport->mac_list_lock);
8061                 dev_err(&hdev->pdev->dev,
8062                         "failed to delete address %pM from mac list\n",
8063                         addr);
8064                 return -ENOENT;
8065         }
8066
8067         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8068         if (!mac_node) {
8069                 spin_unlock_bh(&vport->mac_list_lock);
8070                 return -ENOMEM;
8071         }
8072
8073         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8074
8075         mac_node->state = state;
8076         ether_addr_copy(mac_node->mac_addr, addr);
8077         list_add_tail(&mac_node->node, list);
8078
8079         spin_unlock_bh(&vport->mac_list_lock);
8080
8081         return 0;
8082 }
8083
8084 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8085                              const unsigned char *addr)
8086 {
8087         struct hclge_vport *vport = hclge_get_vport(handle);
8088
8089         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8090                                      addr);
8091 }
8092
8093 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8094                              const unsigned char *addr)
8095 {
8096         struct hclge_dev *hdev = vport->back;
8097         struct hclge_mac_vlan_tbl_entry_cmd req;
8098         struct hclge_desc desc;
8099         u16 egress_port = 0;
8100         int ret;
8101
8102         /* mac addr check */
8103         if (is_zero_ether_addr(addr) ||
8104             is_broadcast_ether_addr(addr) ||
8105             is_multicast_ether_addr(addr)) {
8106                 dev_err(&hdev->pdev->dev,
8107                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8108                          addr, is_zero_ether_addr(addr),
8109                          is_broadcast_ether_addr(addr),
8110                          is_multicast_ether_addr(addr));
8111                 return -EINVAL;
8112         }
8113
8114         memset(&req, 0, sizeof(req));
8115
8116         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8117                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8118
8119         req.egress_port = cpu_to_le16(egress_port);
8120
8121         hclge_prepare_mac_addr(&req, addr, false);
8122
8123         /* Lookup the mac address in the mac_vlan table, and add
8124          * it if the entry is inexistent. Repeated unicast entry
8125          * is not allowed in the mac vlan table.
8126          */
8127         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8128         if (ret == -ENOENT) {
8129                 mutex_lock(&hdev->vport_lock);
8130                 if (!hclge_is_umv_space_full(vport, false)) {
8131                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8132                         if (!ret)
8133                                 hclge_update_umv_space(vport, false);
8134                         mutex_unlock(&hdev->vport_lock);
8135                         return ret;
8136                 }
8137                 mutex_unlock(&hdev->vport_lock);
8138
8139                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8140                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8141                                 hdev->priv_umv_size);
8142
8143                 return -ENOSPC;
8144         }
8145
8146         /* check if we just hit the duplicate */
8147         if (!ret) {
8148                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8149                          vport->vport_id, addr);
8150                 return 0;
8151         }
8152
8153         dev_err(&hdev->pdev->dev,
8154                 "PF failed to add unicast entry(%pM) in the MAC table\n",
8155                 addr);
8156
8157         return ret;
8158 }
8159
8160 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8161                             const unsigned char *addr)
8162 {
8163         struct hclge_vport *vport = hclge_get_vport(handle);
8164
8165         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8166                                      addr);
8167 }
8168
8169 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8170                             const unsigned char *addr)
8171 {
8172         struct hclge_dev *hdev = vport->back;
8173         struct hclge_mac_vlan_tbl_entry_cmd req;
8174         int ret;
8175
8176         /* mac addr check */
8177         if (is_zero_ether_addr(addr) ||
8178             is_broadcast_ether_addr(addr) ||
8179             is_multicast_ether_addr(addr)) {
8180                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8181                         addr);
8182                 return -EINVAL;
8183         }
8184
8185         memset(&req, 0, sizeof(req));
8186         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8187         hclge_prepare_mac_addr(&req, addr, false);
8188         ret = hclge_remove_mac_vlan_tbl(vport, &req);
8189         if (!ret) {
8190                 mutex_lock(&hdev->vport_lock);
8191                 hclge_update_umv_space(vport, true);
8192                 mutex_unlock(&hdev->vport_lock);
8193         } else if (ret == -ENOENT) {
8194                 ret = 0;
8195         }
8196
8197         return ret;
8198 }
8199
8200 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8201                              const unsigned char *addr)
8202 {
8203         struct hclge_vport *vport = hclge_get_vport(handle);
8204
8205         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8206                                      addr);
8207 }
8208
8209 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8210                              const unsigned char *addr)
8211 {
8212         struct hclge_dev *hdev = vport->back;
8213         struct hclge_mac_vlan_tbl_entry_cmd req;
8214         struct hclge_desc desc[3];
8215         int status;
8216
8217         /* mac addr check */
8218         if (!is_multicast_ether_addr(addr)) {
8219                 dev_err(&hdev->pdev->dev,
8220                         "Add mc mac err! invalid mac:%pM.\n",
8221                          addr);
8222                 return -EINVAL;
8223         }
8224         memset(&req, 0, sizeof(req));
8225         hclge_prepare_mac_addr(&req, addr, true);
8226         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8227         if (status) {
8228                 /* This mac addr do not exist, add new entry for it */
8229                 memset(desc[0].data, 0, sizeof(desc[0].data));
8230                 memset(desc[1].data, 0, sizeof(desc[0].data));
8231                 memset(desc[2].data, 0, sizeof(desc[0].data));
8232         }
8233         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8234         if (status)
8235                 return status;
8236         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8237
8238         /* if already overflow, not to print each time */
8239         if (status == -ENOSPC &&
8240             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8241                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8242
8243         return status;
8244 }
8245
8246 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8247                             const unsigned char *addr)
8248 {
8249         struct hclge_vport *vport = hclge_get_vport(handle);
8250
8251         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8252                                      addr);
8253 }
8254
8255 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8256                             const unsigned char *addr)
8257 {
8258         struct hclge_dev *hdev = vport->back;
8259         struct hclge_mac_vlan_tbl_entry_cmd req;
8260         enum hclge_cmd_status status;
8261         struct hclge_desc desc[3];
8262
8263         /* mac addr check */
8264         if (!is_multicast_ether_addr(addr)) {
8265                 dev_dbg(&hdev->pdev->dev,
8266                         "Remove mc mac err! invalid mac:%pM.\n",
8267                          addr);
8268                 return -EINVAL;
8269         }
8270
8271         memset(&req, 0, sizeof(req));
8272         hclge_prepare_mac_addr(&req, addr, true);
8273         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8274         if (!status) {
8275                 /* This mac addr exist, remove this handle's VFID for it */
8276                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8277                 if (status)
8278                         return status;
8279
8280                 if (hclge_is_all_function_id_zero(desc))
8281                         /* All the vfid is zero, so need to delete this entry */
8282                         status = hclge_remove_mac_vlan_tbl(vport, &req);
8283                 else
8284                         /* Not all the vfid is zero, update the vfid */
8285                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8286
8287         } else if (status == -ENOENT) {
8288                 status = 0;
8289         }
8290
8291         return status;
8292 }
8293
8294 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8295                                       struct list_head *list,
8296                                       int (*sync)(struct hclge_vport *,
8297                                                   const unsigned char *))
8298 {
8299         struct hclge_mac_node *mac_node, *tmp;
8300         int ret;
8301
8302         list_for_each_entry_safe(mac_node, tmp, list, node) {
8303                 ret = sync(vport, mac_node->mac_addr);
8304                 if (!ret) {
8305                         mac_node->state = HCLGE_MAC_ACTIVE;
8306                 } else {
8307                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8308                                 &vport->state);
8309                         break;
8310                 }
8311         }
8312 }
8313
8314 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8315                                         struct list_head *list,
8316                                         int (*unsync)(struct hclge_vport *,
8317                                                       const unsigned char *))
8318 {
8319         struct hclge_mac_node *mac_node, *tmp;
8320         int ret;
8321
8322         list_for_each_entry_safe(mac_node, tmp, list, node) {
8323                 ret = unsync(vport, mac_node->mac_addr);
8324                 if (!ret || ret == -ENOENT) {
8325                         list_del(&mac_node->node);
8326                         kfree(mac_node);
8327                 } else {
8328                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8329                                 &vport->state);
8330                         break;
8331                 }
8332         }
8333 }
8334
8335 static bool hclge_sync_from_add_list(struct list_head *add_list,
8336                                      struct list_head *mac_list)
8337 {
8338         struct hclge_mac_node *mac_node, *tmp, *new_node;
8339         bool all_added = true;
8340
8341         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8342                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8343                         all_added = false;
8344
8345                 /* if the mac address from tmp_add_list is not in the
8346                  * uc/mc_mac_list, it means have received a TO_DEL request
8347                  * during the time window of adding the mac address into mac
8348                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8349                  * then it will be removed at next time. else it must be TO_ADD,
8350                  * this address hasn't been added into mac table,
8351                  * so just remove the mac node.
8352                  */
8353                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8354                 if (new_node) {
8355                         hclge_update_mac_node(new_node, mac_node->state);
8356                         list_del(&mac_node->node);
8357                         kfree(mac_node);
8358                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8359                         mac_node->state = HCLGE_MAC_TO_DEL;
8360                         list_del(&mac_node->node);
8361                         list_add_tail(&mac_node->node, mac_list);
8362                 } else {
8363                         list_del(&mac_node->node);
8364                         kfree(mac_node);
8365                 }
8366         }
8367
8368         return all_added;
8369 }
8370
8371 static void hclge_sync_from_del_list(struct list_head *del_list,
8372                                      struct list_head *mac_list)
8373 {
8374         struct hclge_mac_node *mac_node, *tmp, *new_node;
8375
8376         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8377                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8378                 if (new_node) {
8379                         /* If the mac addr exists in the mac list, it means
8380                          * received a new TO_ADD request during the time window
8381                          * of configuring the mac address. For the mac node
8382                          * state is TO_ADD, and the address is already in the
8383                          * in the hardware(due to delete fail), so we just need
8384                          * to change the mac node state to ACTIVE.
8385                          */
8386                         new_node->state = HCLGE_MAC_ACTIVE;
8387                         list_del(&mac_node->node);
8388                         kfree(mac_node);
8389                 } else {
8390                         list_del(&mac_node->node);
8391                         list_add_tail(&mac_node->node, mac_list);
8392                 }
8393         }
8394 }
8395
8396 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8397                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
8398                                         bool is_all_added)
8399 {
8400         if (mac_type == HCLGE_MAC_ADDR_UC) {
8401                 if (is_all_added)
8402                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8403                 else
8404                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8405         } else {
8406                 if (is_all_added)
8407                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8408                 else
8409                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8410         }
8411 }
8412
8413 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8414                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
8415 {
8416         struct hclge_mac_node *mac_node, *tmp, *new_node;
8417         struct list_head tmp_add_list, tmp_del_list;
8418         struct list_head *list;
8419         bool all_added;
8420
8421         INIT_LIST_HEAD(&tmp_add_list);
8422         INIT_LIST_HEAD(&tmp_del_list);
8423
8424         /* move the mac addr to the tmp_add_list and tmp_del_list, then
8425          * we can add/delete these mac addr outside the spin lock
8426          */
8427         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8428                 &vport->uc_mac_list : &vport->mc_mac_list;
8429
8430         spin_lock_bh(&vport->mac_list_lock);
8431
8432         list_for_each_entry_safe(mac_node, tmp, list, node) {
8433                 switch (mac_node->state) {
8434                 case HCLGE_MAC_TO_DEL:
8435                         list_del(&mac_node->node);
8436                         list_add_tail(&mac_node->node, &tmp_del_list);
8437                         break;
8438                 case HCLGE_MAC_TO_ADD:
8439                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8440                         if (!new_node)
8441                                 goto stop_traverse;
8442                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8443                         new_node->state = mac_node->state;
8444                         list_add_tail(&new_node->node, &tmp_add_list);
8445                         break;
8446                 default:
8447                         break;
8448                 }
8449         }
8450
8451 stop_traverse:
8452         spin_unlock_bh(&vport->mac_list_lock);
8453
8454         /* delete first, in order to get max mac table space for adding */
8455         if (mac_type == HCLGE_MAC_ADDR_UC) {
8456                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8457                                             hclge_rm_uc_addr_common);
8458                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8459                                           hclge_add_uc_addr_common);
8460         } else {
8461                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8462                                             hclge_rm_mc_addr_common);
8463                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8464                                           hclge_add_mc_addr_common);
8465         }
8466
8467         /* if some mac addresses were added/deleted fail, move back to the
8468          * mac_list, and retry at next time.
8469          */
8470         spin_lock_bh(&vport->mac_list_lock);
8471
8472         hclge_sync_from_del_list(&tmp_del_list, list);
8473         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8474
8475         spin_unlock_bh(&vport->mac_list_lock);
8476
8477         hclge_update_overflow_flags(vport, mac_type, all_added);
8478 }
8479
8480 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8481 {
8482         struct hclge_dev *hdev = vport->back;
8483
8484         if (test_bit(vport->vport_id, hdev->vport_config_block))
8485                 return false;
8486
8487         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8488                 return true;
8489
8490         return false;
8491 }
8492
8493 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8494 {
8495         int i;
8496
8497         for (i = 0; i < hdev->num_alloc_vport; i++) {
8498                 struct hclge_vport *vport = &hdev->vport[i];
8499
8500                 if (!hclge_need_sync_mac_table(vport))
8501                         continue;
8502
8503                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8504                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8505         }
8506 }
8507
8508 static void hclge_build_del_list(struct list_head *list,
8509                                  bool is_del_list,
8510                                  struct list_head *tmp_del_list)
8511 {
8512         struct hclge_mac_node *mac_cfg, *tmp;
8513
8514         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8515                 switch (mac_cfg->state) {
8516                 case HCLGE_MAC_TO_DEL:
8517                 case HCLGE_MAC_ACTIVE:
8518                         list_del(&mac_cfg->node);
8519                         list_add_tail(&mac_cfg->node, tmp_del_list);
8520                         break;
8521                 case HCLGE_MAC_TO_ADD:
8522                         if (is_del_list) {
8523                                 list_del(&mac_cfg->node);
8524                                 kfree(mac_cfg);
8525                         }
8526                         break;
8527                 }
8528         }
8529 }
8530
8531 static void hclge_unsync_del_list(struct hclge_vport *vport,
8532                                   int (*unsync)(struct hclge_vport *vport,
8533                                                 const unsigned char *addr),
8534                                   bool is_del_list,
8535                                   struct list_head *tmp_del_list)
8536 {
8537         struct hclge_mac_node *mac_cfg, *tmp;
8538         int ret;
8539
8540         list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8541                 ret = unsync(vport, mac_cfg->mac_addr);
8542                 if (!ret || ret == -ENOENT) {
8543                         /* clear all mac addr from hardware, but remain these
8544                          * mac addr in the mac list, and restore them after
8545                          * vf reset finished.
8546                          */
8547                         if (!is_del_list &&
8548                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
8549                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
8550                         } else {
8551                                 list_del(&mac_cfg->node);
8552                                 kfree(mac_cfg);
8553                         }
8554                 } else if (is_del_list) {
8555                         mac_cfg->state = HCLGE_MAC_TO_DEL;
8556                 }
8557         }
8558 }
8559
8560 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8561                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
8562 {
8563         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8564         struct hclge_dev *hdev = vport->back;
8565         struct list_head tmp_del_list, *list;
8566
8567         if (mac_type == HCLGE_MAC_ADDR_UC) {
8568                 list = &vport->uc_mac_list;
8569                 unsync = hclge_rm_uc_addr_common;
8570         } else {
8571                 list = &vport->mc_mac_list;
8572                 unsync = hclge_rm_mc_addr_common;
8573         }
8574
8575         INIT_LIST_HEAD(&tmp_del_list);
8576
8577         if (!is_del_list)
8578                 set_bit(vport->vport_id, hdev->vport_config_block);
8579
8580         spin_lock_bh(&vport->mac_list_lock);
8581
8582         hclge_build_del_list(list, is_del_list, &tmp_del_list);
8583
8584         spin_unlock_bh(&vport->mac_list_lock);
8585
8586         hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8587
8588         spin_lock_bh(&vport->mac_list_lock);
8589
8590         hclge_sync_from_del_list(&tmp_del_list, list);
8591
8592         spin_unlock_bh(&vport->mac_list_lock);
8593 }
8594
8595 /* remove all mac address when uninitailize */
8596 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8597                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
8598 {
8599         struct hclge_mac_node *mac_node, *tmp;
8600         struct hclge_dev *hdev = vport->back;
8601         struct list_head tmp_del_list, *list;
8602
8603         INIT_LIST_HEAD(&tmp_del_list);
8604
8605         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8606                 &vport->uc_mac_list : &vport->mc_mac_list;
8607
8608         spin_lock_bh(&vport->mac_list_lock);
8609
8610         list_for_each_entry_safe(mac_node, tmp, list, node) {
8611                 switch (mac_node->state) {
8612                 case HCLGE_MAC_TO_DEL:
8613                 case HCLGE_MAC_ACTIVE:
8614                         list_del(&mac_node->node);
8615                         list_add_tail(&mac_node->node, &tmp_del_list);
8616                         break;
8617                 case HCLGE_MAC_TO_ADD:
8618                         list_del(&mac_node->node);
8619                         kfree(mac_node);
8620                         break;
8621                 }
8622         }
8623
8624         spin_unlock_bh(&vport->mac_list_lock);
8625
8626         if (mac_type == HCLGE_MAC_ADDR_UC)
8627                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8628                                             hclge_rm_uc_addr_common);
8629         else
8630                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8631                                             hclge_rm_mc_addr_common);
8632
8633         if (!list_empty(&tmp_del_list))
8634                 dev_warn(&hdev->pdev->dev,
8635                          "uninit %s mac list for vport %u not completely.\n",
8636                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8637                          vport->vport_id);
8638
8639         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8640                 list_del(&mac_node->node);
8641                 kfree(mac_node);
8642         }
8643 }
8644
8645 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8646 {
8647         struct hclge_vport *vport;
8648         int i;
8649
8650         for (i = 0; i < hdev->num_alloc_vport; i++) {
8651                 vport = &hdev->vport[i];
8652                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8653                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8654         }
8655 }
8656
8657 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8658                                               u16 cmdq_resp, u8 resp_code)
8659 {
8660 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
8661 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
8662 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
8663 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
8664
8665         int return_status;
8666
8667         if (cmdq_resp) {
8668                 dev_err(&hdev->pdev->dev,
8669                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8670                         cmdq_resp);
8671                 return -EIO;
8672         }
8673
8674         switch (resp_code) {
8675         case HCLGE_ETHERTYPE_SUCCESS_ADD:
8676         case HCLGE_ETHERTYPE_ALREADY_ADD:
8677                 return_status = 0;
8678                 break;
8679         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8680                 dev_err(&hdev->pdev->dev,
8681                         "add mac ethertype failed for manager table overflow.\n");
8682                 return_status = -EIO;
8683                 break;
8684         case HCLGE_ETHERTYPE_KEY_CONFLICT:
8685                 dev_err(&hdev->pdev->dev,
8686                         "add mac ethertype failed for key conflict.\n");
8687                 return_status = -EIO;
8688                 break;
8689         default:
8690                 dev_err(&hdev->pdev->dev,
8691                         "add mac ethertype failed for undefined, code=%u.\n",
8692                         resp_code);
8693                 return_status = -EIO;
8694         }
8695
8696         return return_status;
8697 }
8698
8699 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8700                                      u8 *mac_addr)
8701 {
8702         struct hclge_mac_vlan_tbl_entry_cmd req;
8703         struct hclge_dev *hdev = vport->back;
8704         struct hclge_desc desc;
8705         u16 egress_port = 0;
8706         int i;
8707
8708         if (is_zero_ether_addr(mac_addr))
8709                 return false;
8710
8711         memset(&req, 0, sizeof(req));
8712         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8713                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8714         req.egress_port = cpu_to_le16(egress_port);
8715         hclge_prepare_mac_addr(&req, mac_addr, false);
8716
8717         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8718                 return true;
8719
8720         vf_idx += HCLGE_VF_VPORT_START_NUM;
8721         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8722                 if (i != vf_idx &&
8723                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8724                         return true;
8725
8726         return false;
8727 }
8728
8729 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8730                             u8 *mac_addr)
8731 {
8732         struct hclge_vport *vport = hclge_get_vport(handle);
8733         struct hclge_dev *hdev = vport->back;
8734
8735         vport = hclge_get_vf_vport(hdev, vf);
8736         if (!vport)
8737                 return -EINVAL;
8738
8739         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8740                 dev_info(&hdev->pdev->dev,
8741                          "Specified MAC(=%pM) is same as before, no change committed!\n",
8742                          mac_addr);
8743                 return 0;
8744         }
8745
8746         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8747                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8748                         mac_addr);
8749                 return -EEXIST;
8750         }
8751
8752         ether_addr_copy(vport->vf_info.mac, mac_addr);
8753
8754         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8755                 dev_info(&hdev->pdev->dev,
8756                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8757                          vf, mac_addr);
8758                 return hclge_inform_reset_assert_to_vf(vport);
8759         }
8760
8761         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8762                  vf, mac_addr);
8763         return 0;
8764 }
8765
8766 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8767                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
8768 {
8769         struct hclge_desc desc;
8770         u8 resp_code;
8771         u16 retval;
8772         int ret;
8773
8774         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8775         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8776
8777         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8778         if (ret) {
8779                 dev_err(&hdev->pdev->dev,
8780                         "add mac ethertype failed for cmd_send, ret =%d.\n",
8781                         ret);
8782                 return ret;
8783         }
8784
8785         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8786         retval = le16_to_cpu(desc.retval);
8787
8788         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8789 }
8790
8791 static int init_mgr_tbl(struct hclge_dev *hdev)
8792 {
8793         int ret;
8794         int i;
8795
8796         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8797                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8798                 if (ret) {
8799                         dev_err(&hdev->pdev->dev,
8800                                 "add mac ethertype failed, ret =%d.\n",
8801                                 ret);
8802                         return ret;
8803                 }
8804         }
8805
8806         return 0;
8807 }
8808
8809 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8810 {
8811         struct hclge_vport *vport = hclge_get_vport(handle);
8812         struct hclge_dev *hdev = vport->back;
8813
8814         ether_addr_copy(p, hdev->hw.mac.mac_addr);
8815 }
8816
8817 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8818                                        const u8 *old_addr, const u8 *new_addr)
8819 {
8820         struct list_head *list = &vport->uc_mac_list;
8821         struct hclge_mac_node *old_node, *new_node;
8822
8823         new_node = hclge_find_mac_node(list, new_addr);
8824         if (!new_node) {
8825                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8826                 if (!new_node)
8827                         return -ENOMEM;
8828
8829                 new_node->state = HCLGE_MAC_TO_ADD;
8830                 ether_addr_copy(new_node->mac_addr, new_addr);
8831                 list_add(&new_node->node, list);
8832         } else {
8833                 if (new_node->state == HCLGE_MAC_TO_DEL)
8834                         new_node->state = HCLGE_MAC_ACTIVE;
8835
8836                 /* make sure the new addr is in the list head, avoid dev
8837                  * addr may be not re-added into mac table for the umv space
8838                  * limitation after global/imp reset which will clear mac
8839                  * table by hardware.
8840                  */
8841                 list_move(&new_node->node, list);
8842         }
8843
8844         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8845                 old_node = hclge_find_mac_node(list, old_addr);
8846                 if (old_node) {
8847                         if (old_node->state == HCLGE_MAC_TO_ADD) {
8848                                 list_del(&old_node->node);
8849                                 kfree(old_node);
8850                         } else {
8851                                 old_node->state = HCLGE_MAC_TO_DEL;
8852                         }
8853                 }
8854         }
8855
8856         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8857
8858         return 0;
8859 }
8860
8861 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8862                               bool is_first)
8863 {
8864         const unsigned char *new_addr = (const unsigned char *)p;
8865         struct hclge_vport *vport = hclge_get_vport(handle);
8866         struct hclge_dev *hdev = vport->back;
8867         unsigned char *old_addr = NULL;
8868         int ret;
8869
8870         /* mac addr check */
8871         if (is_zero_ether_addr(new_addr) ||
8872             is_broadcast_ether_addr(new_addr) ||
8873             is_multicast_ether_addr(new_addr)) {
8874                 dev_err(&hdev->pdev->dev,
8875                         "change uc mac err! invalid mac: %pM.\n",
8876                          new_addr);
8877                 return -EINVAL;
8878         }
8879
8880         ret = hclge_pause_addr_cfg(hdev, new_addr);
8881         if (ret) {
8882                 dev_err(&hdev->pdev->dev,
8883                         "failed to configure mac pause address, ret = %d\n",
8884                         ret);
8885                 return ret;
8886         }
8887
8888         if (!is_first)
8889                 old_addr = hdev->hw.mac.mac_addr;
8890
8891         spin_lock_bh(&vport->mac_list_lock);
8892         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8893         if (ret) {
8894                 dev_err(&hdev->pdev->dev,
8895                         "failed to change the mac addr:%pM, ret = %d\n",
8896                         new_addr, ret);
8897                 spin_unlock_bh(&vport->mac_list_lock);
8898
8899                 if (!is_first)
8900                         hclge_pause_addr_cfg(hdev, old_addr);
8901
8902                 return ret;
8903         }
8904         /* we must update dev addr with spin lock protect, preventing dev addr
8905          * being removed by set_rx_mode path.
8906          */
8907         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8908         spin_unlock_bh(&vport->mac_list_lock);
8909
8910         hclge_task_schedule(hdev, 0);
8911
8912         return 0;
8913 }
8914
8915 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
8916 {
8917         struct mii_ioctl_data *data = if_mii(ifr);
8918
8919         if (!hnae3_dev_phy_imp_supported(hdev))
8920                 return -EOPNOTSUPP;
8921
8922         switch (cmd) {
8923         case SIOCGMIIPHY:
8924                 data->phy_id = hdev->hw.mac.phy_addr;
8925                 /* this command reads phy id and register at the same time */
8926                 fallthrough;
8927         case SIOCGMIIREG:
8928                 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
8929                 return 0;
8930
8931         case SIOCSMIIREG:
8932                 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
8933         default:
8934                 return -EOPNOTSUPP;
8935         }
8936 }
8937
8938 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8939                           int cmd)
8940 {
8941         struct hclge_vport *vport = hclge_get_vport(handle);
8942         struct hclge_dev *hdev = vport->back;
8943
8944         if (!hdev->hw.mac.phydev)
8945                 return hclge_mii_ioctl(hdev, ifr, cmd);
8946
8947         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8948 }
8949
8950 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8951                                       u8 fe_type, bool filter_en, u8 vf_id)
8952 {
8953         struct hclge_vlan_filter_ctrl_cmd *req;
8954         struct hclge_desc desc;
8955         int ret;
8956
8957         /* read current vlan filter parameter */
8958         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8959         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8960         req->vlan_type = vlan_type;
8961         req->vf_id = vf_id;
8962
8963         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8964         if (ret) {
8965                 dev_err(&hdev->pdev->dev,
8966                         "failed to get vlan filter config, ret = %d.\n", ret);
8967                 return ret;
8968         }
8969
8970         /* modify and write new config parameter */
8971         hclge_cmd_reuse_desc(&desc, false);
8972         req->vlan_fe = filter_en ?
8973                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8974
8975         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8976         if (ret)
8977                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8978                         ret);
8979
8980         return ret;
8981 }
8982
8983 #define HCLGE_FILTER_TYPE_VF            0
8984 #define HCLGE_FILTER_TYPE_PORT          1
8985 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
8986 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
8987 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
8988 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
8989 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
8990 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
8991                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8992 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
8993                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8994
8995 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8996 {
8997         struct hclge_vport *vport = hclge_get_vport(handle);
8998         struct hclge_dev *hdev = vport->back;
8999
9000         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9001                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9002                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
9003                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9004                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
9005         } else {
9006                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9007                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
9008                                            0);
9009         }
9010         if (enable)
9011                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9012         else
9013                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
9014 }
9015
9016 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9017                                         bool is_kill, u16 vlan,
9018                                         struct hclge_desc *desc)
9019 {
9020         struct hclge_vlan_filter_vf_cfg_cmd *req0;
9021         struct hclge_vlan_filter_vf_cfg_cmd *req1;
9022         u8 vf_byte_val;
9023         u8 vf_byte_off;
9024         int ret;
9025
9026         hclge_cmd_setup_basic_desc(&desc[0],
9027                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9028         hclge_cmd_setup_basic_desc(&desc[1],
9029                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9030
9031         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9032
9033         vf_byte_off = vfid / 8;
9034         vf_byte_val = 1 << (vfid % 8);
9035
9036         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9037         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9038
9039         req0->vlan_id  = cpu_to_le16(vlan);
9040         req0->vlan_cfg = is_kill;
9041
9042         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9043                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9044         else
9045                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9046
9047         ret = hclge_cmd_send(&hdev->hw, desc, 2);
9048         if (ret) {
9049                 dev_err(&hdev->pdev->dev,
9050                         "Send vf vlan command fail, ret =%d.\n",
9051                         ret);
9052                 return ret;
9053         }
9054
9055         return 0;
9056 }
9057
9058 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9059                                           bool is_kill, struct hclge_desc *desc)
9060 {
9061         struct hclge_vlan_filter_vf_cfg_cmd *req;
9062
9063         req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9064
9065         if (!is_kill) {
9066 #define HCLGE_VF_VLAN_NO_ENTRY  2
9067                 if (!req->resp_code || req->resp_code == 1)
9068                         return 0;
9069
9070                 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9071                         set_bit(vfid, hdev->vf_vlan_full);
9072                         dev_warn(&hdev->pdev->dev,
9073                                  "vf vlan table is full, vf vlan filter is disabled\n");
9074                         return 0;
9075                 }
9076
9077                 dev_err(&hdev->pdev->dev,
9078                         "Add vf vlan filter fail, ret =%u.\n",
9079                         req->resp_code);
9080         } else {
9081 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
9082                 if (!req->resp_code)
9083                         return 0;
9084
9085                 /* vf vlan filter is disabled when vf vlan table is full,
9086                  * then new vlan id will not be added into vf vlan table.
9087                  * Just return 0 without warning, avoid massive verbose
9088                  * print logs when unload.
9089                  */
9090                 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9091                         return 0;
9092
9093                 dev_err(&hdev->pdev->dev,
9094                         "Kill vf vlan filter fail, ret =%u.\n",
9095                         req->resp_code);
9096         }
9097
9098         return -EIO;
9099 }
9100
9101 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9102                                     bool is_kill, u16 vlan,
9103                                     __be16 proto)
9104 {
9105         struct hclge_vport *vport = &hdev->vport[vfid];
9106         struct hclge_desc desc[2];
9107         int ret;
9108
9109         /* if vf vlan table is full, firmware will close vf vlan filter, it
9110          * is unable and unnecessary to add new vlan id to vf vlan filter.
9111          * If spoof check is enable, and vf vlan is full, it shouldn't add
9112          * new vlan, because tx packets with these vlan id will be dropped.
9113          */
9114         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9115                 if (vport->vf_info.spoofchk && vlan) {
9116                         dev_err(&hdev->pdev->dev,
9117                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9118                         return -EPERM;
9119                 }
9120                 return 0;
9121         }
9122
9123         ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9124         if (ret)
9125                 return ret;
9126
9127         return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9128 }
9129
9130 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9131                                       u16 vlan_id, bool is_kill)
9132 {
9133         struct hclge_vlan_filter_pf_cfg_cmd *req;
9134         struct hclge_desc desc;
9135         u8 vlan_offset_byte_val;
9136         u8 vlan_offset_byte;
9137         u8 vlan_offset_160;
9138         int ret;
9139
9140         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9141
9142         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9143         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9144                            HCLGE_VLAN_BYTE_SIZE;
9145         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9146
9147         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9148         req->vlan_offset = vlan_offset_160;
9149         req->vlan_cfg = is_kill;
9150         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9151
9152         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9153         if (ret)
9154                 dev_err(&hdev->pdev->dev,
9155                         "port vlan command, send fail, ret =%d.\n", ret);
9156         return ret;
9157 }
9158
9159 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9160                                     u16 vport_id, u16 vlan_id,
9161                                     bool is_kill)
9162 {
9163         u16 vport_idx, vport_num = 0;
9164         int ret;
9165
9166         if (is_kill && !vlan_id)
9167                 return 0;
9168
9169         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
9170                                        proto);
9171         if (ret) {
9172                 dev_err(&hdev->pdev->dev,
9173                         "Set %u vport vlan filter config fail, ret =%d.\n",
9174                         vport_id, ret);
9175                 return ret;
9176         }
9177
9178         /* vlan 0 may be added twice when 8021q module is enabled */
9179         if (!is_kill && !vlan_id &&
9180             test_bit(vport_id, hdev->vlan_table[vlan_id]))
9181                 return 0;
9182
9183         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9184                 dev_err(&hdev->pdev->dev,
9185                         "Add port vlan failed, vport %u is already in vlan %u\n",
9186                         vport_id, vlan_id);
9187                 return -EINVAL;
9188         }
9189
9190         if (is_kill &&
9191             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9192                 dev_err(&hdev->pdev->dev,
9193                         "Delete port vlan failed, vport %u is not in vlan %u\n",
9194                         vport_id, vlan_id);
9195                 return -EINVAL;
9196         }
9197
9198         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9199                 vport_num++;
9200
9201         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9202                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9203                                                  is_kill);
9204
9205         return ret;
9206 }
9207
9208 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9209 {
9210         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9211         struct hclge_vport_vtag_tx_cfg_cmd *req;
9212         struct hclge_dev *hdev = vport->back;
9213         struct hclge_desc desc;
9214         u16 bmap_index;
9215         int status;
9216
9217         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9218
9219         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9220         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9221         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9222         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9223                       vcfg->accept_tag1 ? 1 : 0);
9224         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9225                       vcfg->accept_untag1 ? 1 : 0);
9226         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9227                       vcfg->accept_tag2 ? 1 : 0);
9228         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9229                       vcfg->accept_untag2 ? 1 : 0);
9230         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9231                       vcfg->insert_tag1_en ? 1 : 0);
9232         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9233                       vcfg->insert_tag2_en ? 1 : 0);
9234         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9235                       vcfg->tag_shift_mode_en ? 1 : 0);
9236         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9237
9238         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9239         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9240                         HCLGE_VF_NUM_PER_BYTE;
9241         req->vf_bitmap[bmap_index] =
9242                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9243
9244         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9245         if (status)
9246                 dev_err(&hdev->pdev->dev,
9247                         "Send port txvlan cfg command fail, ret =%d\n",
9248                         status);
9249
9250         return status;
9251 }
9252
9253 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9254 {
9255         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9256         struct hclge_vport_vtag_rx_cfg_cmd *req;
9257         struct hclge_dev *hdev = vport->back;
9258         struct hclge_desc desc;
9259         u16 bmap_index;
9260         int status;
9261
9262         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9263
9264         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9265         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9266                       vcfg->strip_tag1_en ? 1 : 0);
9267         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9268                       vcfg->strip_tag2_en ? 1 : 0);
9269         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9270                       vcfg->vlan1_vlan_prionly ? 1 : 0);
9271         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9272                       vcfg->vlan2_vlan_prionly ? 1 : 0);
9273         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9274                       vcfg->strip_tag1_discard_en ? 1 : 0);
9275         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9276                       vcfg->strip_tag2_discard_en ? 1 : 0);
9277
9278         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9279         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9280                         HCLGE_VF_NUM_PER_BYTE;
9281         req->vf_bitmap[bmap_index] =
9282                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9283
9284         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9285         if (status)
9286                 dev_err(&hdev->pdev->dev,
9287                         "Send port rxvlan cfg command fail, ret =%d\n",
9288                         status);
9289
9290         return status;
9291 }
9292
9293 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9294                                   u16 port_base_vlan_state,
9295                                   u16 vlan_tag)
9296 {
9297         int ret;
9298
9299         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9300                 vport->txvlan_cfg.accept_tag1 = true;
9301                 vport->txvlan_cfg.insert_tag1_en = false;
9302                 vport->txvlan_cfg.default_tag1 = 0;
9303         } else {
9304                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9305
9306                 vport->txvlan_cfg.accept_tag1 =
9307                         ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9308                 vport->txvlan_cfg.insert_tag1_en = true;
9309                 vport->txvlan_cfg.default_tag1 = vlan_tag;
9310         }
9311
9312         vport->txvlan_cfg.accept_untag1 = true;
9313
9314         /* accept_tag2 and accept_untag2 are not supported on
9315          * pdev revision(0x20), new revision support them,
9316          * this two fields can not be configured by user.
9317          */
9318         vport->txvlan_cfg.accept_tag2 = true;
9319         vport->txvlan_cfg.accept_untag2 = true;
9320         vport->txvlan_cfg.insert_tag2_en = false;
9321         vport->txvlan_cfg.default_tag2 = 0;
9322         vport->txvlan_cfg.tag_shift_mode_en = true;
9323
9324         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9325                 vport->rxvlan_cfg.strip_tag1_en = false;
9326                 vport->rxvlan_cfg.strip_tag2_en =
9327                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9328                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9329         } else {
9330                 vport->rxvlan_cfg.strip_tag1_en =
9331                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9332                 vport->rxvlan_cfg.strip_tag2_en = true;
9333                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9334         }
9335
9336         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9337         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9338         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9339
9340         ret = hclge_set_vlan_tx_offload_cfg(vport);
9341         if (ret)
9342                 return ret;
9343
9344         return hclge_set_vlan_rx_offload_cfg(vport);
9345 }
9346
9347 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9348 {
9349         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9350         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9351         struct hclge_desc desc;
9352         int status;
9353
9354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9355         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9356         rx_req->ot_fst_vlan_type =
9357                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9358         rx_req->ot_sec_vlan_type =
9359                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9360         rx_req->in_fst_vlan_type =
9361                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9362         rx_req->in_sec_vlan_type =
9363                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9364
9365         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9366         if (status) {
9367                 dev_err(&hdev->pdev->dev,
9368                         "Send rxvlan protocol type command fail, ret =%d\n",
9369                         status);
9370                 return status;
9371         }
9372
9373         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9374
9375         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9376         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9377         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9378
9379         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9380         if (status)
9381                 dev_err(&hdev->pdev->dev,
9382                         "Send txvlan protocol type command fail, ret =%d\n",
9383                         status);
9384
9385         return status;
9386 }
9387
9388 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9389 {
9390 #define HCLGE_DEF_VLAN_TYPE             0x8100
9391
9392         struct hnae3_handle *handle = &hdev->vport[0].nic;
9393         struct hclge_vport *vport;
9394         int ret;
9395         int i;
9396
9397         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9398                 /* for revision 0x21, vf vlan filter is per function */
9399                 for (i = 0; i < hdev->num_alloc_vport; i++) {
9400                         vport = &hdev->vport[i];
9401                         ret = hclge_set_vlan_filter_ctrl(hdev,
9402                                                          HCLGE_FILTER_TYPE_VF,
9403                                                          HCLGE_FILTER_FE_EGRESS,
9404                                                          true,
9405                                                          vport->vport_id);
9406                         if (ret)
9407                                 return ret;
9408                 }
9409
9410                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9411                                                  HCLGE_FILTER_FE_INGRESS, true,
9412                                                  0);
9413                 if (ret)
9414                         return ret;
9415         } else {
9416                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9417                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
9418                                                  true, 0);
9419                 if (ret)
9420                         return ret;
9421         }
9422
9423         handle->netdev_flags |= HNAE3_VLAN_FLTR;
9424
9425         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9426         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9427         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9428         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9429         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9430         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9431
9432         ret = hclge_set_vlan_protocol_type(hdev);
9433         if (ret)
9434                 return ret;
9435
9436         for (i = 0; i < hdev->num_alloc_vport; i++) {
9437                 u16 vlan_tag;
9438
9439                 vport = &hdev->vport[i];
9440                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9441
9442                 ret = hclge_vlan_offload_cfg(vport,
9443                                              vport->port_base_vlan_cfg.state,
9444                                              vlan_tag);
9445                 if (ret)
9446                         return ret;
9447         }
9448
9449         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9450 }
9451
9452 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9453                                        bool writen_to_tbl)
9454 {
9455         struct hclge_vport_vlan_cfg *vlan;
9456
9457         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9458         if (!vlan)
9459                 return;
9460
9461         vlan->hd_tbl_status = writen_to_tbl;
9462         vlan->vlan_id = vlan_id;
9463
9464         list_add_tail(&vlan->node, &vport->vlan_list);
9465 }
9466
9467 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9468 {
9469         struct hclge_vport_vlan_cfg *vlan, *tmp;
9470         struct hclge_dev *hdev = vport->back;
9471         int ret;
9472
9473         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9474                 if (!vlan->hd_tbl_status) {
9475                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9476                                                        vport->vport_id,
9477                                                        vlan->vlan_id, false);
9478                         if (ret) {
9479                                 dev_err(&hdev->pdev->dev,
9480                                         "restore vport vlan list failed, ret=%d\n",
9481                                         ret);
9482                                 return ret;
9483                         }
9484                 }
9485                 vlan->hd_tbl_status = true;
9486         }
9487
9488         return 0;
9489 }
9490
9491 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9492                                       bool is_write_tbl)
9493 {
9494         struct hclge_vport_vlan_cfg *vlan, *tmp;
9495         struct hclge_dev *hdev = vport->back;
9496
9497         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9498                 if (vlan->vlan_id == vlan_id) {
9499                         if (is_write_tbl && vlan->hd_tbl_status)
9500                                 hclge_set_vlan_filter_hw(hdev,
9501                                                          htons(ETH_P_8021Q),
9502                                                          vport->vport_id,
9503                                                          vlan_id,
9504                                                          true);
9505
9506                         list_del(&vlan->node);
9507                         kfree(vlan);
9508                         break;
9509                 }
9510         }
9511 }
9512
9513 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9514 {
9515         struct hclge_vport_vlan_cfg *vlan, *tmp;
9516         struct hclge_dev *hdev = vport->back;
9517
9518         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9519                 if (vlan->hd_tbl_status)
9520                         hclge_set_vlan_filter_hw(hdev,
9521                                                  htons(ETH_P_8021Q),
9522                                                  vport->vport_id,
9523                                                  vlan->vlan_id,
9524                                                  true);
9525
9526                 vlan->hd_tbl_status = false;
9527                 if (is_del_list) {
9528                         list_del(&vlan->node);
9529                         kfree(vlan);
9530                 }
9531         }
9532         clear_bit(vport->vport_id, hdev->vf_vlan_full);
9533 }
9534
9535 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9536 {
9537         struct hclge_vport_vlan_cfg *vlan, *tmp;
9538         struct hclge_vport *vport;
9539         int i;
9540
9541         for (i = 0; i < hdev->num_alloc_vport; i++) {
9542                 vport = &hdev->vport[i];
9543                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9544                         list_del(&vlan->node);
9545                         kfree(vlan);
9546                 }
9547         }
9548 }
9549
9550 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9551 {
9552         struct hclge_vport_vlan_cfg *vlan, *tmp;
9553         struct hclge_dev *hdev = vport->back;
9554         u16 vlan_proto;
9555         u16 vlan_id;
9556         u16 state;
9557         int ret;
9558
9559         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9560         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9561         state = vport->port_base_vlan_cfg.state;
9562
9563         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9564                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9565                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9566                                          vport->vport_id, vlan_id,
9567                                          false);
9568                 return;
9569         }
9570
9571         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9572                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9573                                                vport->vport_id,
9574                                                vlan->vlan_id, false);
9575                 if (ret)
9576                         break;
9577                 vlan->hd_tbl_status = true;
9578         }
9579 }
9580
9581 /* For global reset and imp reset, hardware will clear the mac table,
9582  * so we change the mac address state from ACTIVE to TO_ADD, then they
9583  * can be restored in the service task after reset complete. Furtherly,
9584  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9585  * be restored after reset, so just remove these mac nodes from mac_list.
9586  */
9587 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9588 {
9589         struct hclge_mac_node *mac_node, *tmp;
9590
9591         list_for_each_entry_safe(mac_node, tmp, list, node) {
9592                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9593                         mac_node->state = HCLGE_MAC_TO_ADD;
9594                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9595                         list_del(&mac_node->node);
9596                         kfree(mac_node);
9597                 }
9598         }
9599 }
9600
9601 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9602 {
9603         spin_lock_bh(&vport->mac_list_lock);
9604
9605         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9606         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9607         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9608
9609         spin_unlock_bh(&vport->mac_list_lock);
9610 }
9611
9612 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9613 {
9614         struct hclge_vport *vport = &hdev->vport[0];
9615         struct hnae3_handle *handle = &vport->nic;
9616
9617         hclge_restore_mac_table_common(vport);
9618         hclge_restore_vport_vlan_table(vport);
9619         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9620
9621         hclge_restore_fd_entries(handle);
9622 }
9623
9624 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9625 {
9626         struct hclge_vport *vport = hclge_get_vport(handle);
9627
9628         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9629                 vport->rxvlan_cfg.strip_tag1_en = false;
9630                 vport->rxvlan_cfg.strip_tag2_en = enable;
9631                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9632         } else {
9633                 vport->rxvlan_cfg.strip_tag1_en = enable;
9634                 vport->rxvlan_cfg.strip_tag2_en = true;
9635                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9636         }
9637
9638         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9639         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9640         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9641         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9642
9643         return hclge_set_vlan_rx_offload_cfg(vport);
9644 }
9645
9646 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9647                                             u16 port_base_vlan_state,
9648                                             struct hclge_vlan_info *new_info,
9649                                             struct hclge_vlan_info *old_info)
9650 {
9651         struct hclge_dev *hdev = vport->back;
9652         int ret;
9653
9654         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9655                 hclge_rm_vport_all_vlan_table(vport, false);
9656                 return hclge_set_vlan_filter_hw(hdev,
9657                                                  htons(new_info->vlan_proto),
9658                                                  vport->vport_id,
9659                                                  new_info->vlan_tag,
9660                                                  false);
9661         }
9662
9663         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9664                                        vport->vport_id, old_info->vlan_tag,
9665                                        true);
9666         if (ret)
9667                 return ret;
9668
9669         return hclge_add_vport_all_vlan_table(vport);
9670 }
9671
9672 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9673                                     struct hclge_vlan_info *vlan_info)
9674 {
9675         struct hnae3_handle *nic = &vport->nic;
9676         struct hclge_vlan_info *old_vlan_info;
9677         struct hclge_dev *hdev = vport->back;
9678         int ret;
9679
9680         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9681
9682         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9683         if (ret)
9684                 return ret;
9685
9686         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9687                 /* add new VLAN tag */
9688                 ret = hclge_set_vlan_filter_hw(hdev,
9689                                                htons(vlan_info->vlan_proto),
9690                                                vport->vport_id,
9691                                                vlan_info->vlan_tag,
9692                                                false);
9693                 if (ret)
9694                         return ret;
9695
9696                 /* remove old VLAN tag */
9697                 ret = hclge_set_vlan_filter_hw(hdev,
9698                                                htons(old_vlan_info->vlan_proto),
9699                                                vport->vport_id,
9700                                                old_vlan_info->vlan_tag,
9701                                                true);
9702                 if (ret)
9703                         return ret;
9704
9705                 goto update;
9706         }
9707
9708         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9709                                                old_vlan_info);
9710         if (ret)
9711                 return ret;
9712
9713         /* update state only when disable/enable port based VLAN */
9714         vport->port_base_vlan_cfg.state = state;
9715         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9716                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9717         else
9718                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9719
9720 update:
9721         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9722         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9723         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9724
9725         return 0;
9726 }
9727
9728 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9729                                           enum hnae3_port_base_vlan_state state,
9730                                           u16 vlan)
9731 {
9732         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9733                 if (!vlan)
9734                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9735                 else
9736                         return HNAE3_PORT_BASE_VLAN_ENABLE;
9737         } else {
9738                 if (!vlan)
9739                         return HNAE3_PORT_BASE_VLAN_DISABLE;
9740                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9741                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9742                 else
9743                         return HNAE3_PORT_BASE_VLAN_MODIFY;
9744         }
9745 }
9746
9747 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9748                                     u16 vlan, u8 qos, __be16 proto)
9749 {
9750         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9751         struct hclge_vport *vport = hclge_get_vport(handle);
9752         struct hclge_dev *hdev = vport->back;
9753         struct hclge_vlan_info vlan_info;
9754         u16 state;
9755         int ret;
9756
9757         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9758                 return -EOPNOTSUPP;
9759
9760         vport = hclge_get_vf_vport(hdev, vfid);
9761         if (!vport)
9762                 return -EINVAL;
9763
9764         /* qos is a 3 bits value, so can not be bigger than 7 */
9765         if (vlan > VLAN_N_VID - 1 || qos > 7)
9766                 return -EINVAL;
9767         if (proto != htons(ETH_P_8021Q))
9768                 return -EPROTONOSUPPORT;
9769
9770         state = hclge_get_port_base_vlan_state(vport,
9771                                                vport->port_base_vlan_cfg.state,
9772                                                vlan);
9773         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9774                 return 0;
9775
9776         vlan_info.vlan_tag = vlan;
9777         vlan_info.qos = qos;
9778         vlan_info.vlan_proto = ntohs(proto);
9779
9780         ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9781         if (ret) {
9782                 dev_err(&hdev->pdev->dev,
9783                         "failed to update port base vlan for vf %d, ret = %d\n",
9784                         vfid, ret);
9785                 return ret;
9786         }
9787
9788         /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9789          * VLAN state.
9790          */
9791         if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9792             test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9793                 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9794                                                   vport->vport_id, state,
9795                                                   vlan, qos,
9796                                                   ntohs(proto));
9797
9798         return 0;
9799 }
9800
9801 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9802 {
9803         struct hclge_vlan_info *vlan_info;
9804         struct hclge_vport *vport;
9805         int ret;
9806         int vf;
9807
9808         /* clear port base vlan for all vf */
9809         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9810                 vport = &hdev->vport[vf];
9811                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9812
9813                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9814                                                vport->vport_id,
9815                                                vlan_info->vlan_tag, true);
9816                 if (ret)
9817                         dev_err(&hdev->pdev->dev,
9818                                 "failed to clear vf vlan for vf%d, ret = %d\n",
9819                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9820         }
9821 }
9822
9823 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9824                           u16 vlan_id, bool is_kill)
9825 {
9826         struct hclge_vport *vport = hclge_get_vport(handle);
9827         struct hclge_dev *hdev = vport->back;
9828         bool writen_to_tbl = false;
9829         int ret = 0;
9830
9831         /* When device is resetting or reset failed, firmware is unable to
9832          * handle mailbox. Just record the vlan id, and remove it after
9833          * reset finished.
9834          */
9835         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9836              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9837                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9838                 return -EBUSY;
9839         }
9840
9841         /* when port base vlan enabled, we use port base vlan as the vlan
9842          * filter entry. In this case, we don't update vlan filter table
9843          * when user add new vlan or remove exist vlan, just update the vport
9844          * vlan list. The vlan id in vlan list will be writen in vlan filter
9845          * table until port base vlan disabled
9846          */
9847         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9848                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9849                                                vlan_id, is_kill);
9850                 writen_to_tbl = true;
9851         }
9852
9853         if (!ret) {
9854                 if (is_kill)
9855                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9856                 else
9857                         hclge_add_vport_vlan_table(vport, vlan_id,
9858                                                    writen_to_tbl);
9859         } else if (is_kill) {
9860                 /* when remove hw vlan filter failed, record the vlan id,
9861                  * and try to remove it from hw later, to be consistence
9862                  * with stack
9863                  */
9864                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9865         }
9866         return ret;
9867 }
9868
9869 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9870 {
9871 #define HCLGE_MAX_SYNC_COUNT    60
9872
9873         int i, ret, sync_cnt = 0;
9874         u16 vlan_id;
9875
9876         /* start from vport 1 for PF is always alive */
9877         for (i = 0; i < hdev->num_alloc_vport; i++) {
9878                 struct hclge_vport *vport = &hdev->vport[i];
9879
9880                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9881                                          VLAN_N_VID);
9882                 while (vlan_id != VLAN_N_VID) {
9883                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9884                                                        vport->vport_id, vlan_id,
9885                                                        true);
9886                         if (ret && ret != -EINVAL)
9887                                 return;
9888
9889                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9890                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9891
9892                         sync_cnt++;
9893                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9894                                 return;
9895
9896                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9897                                                  VLAN_N_VID);
9898                 }
9899         }
9900 }
9901
9902 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9903 {
9904         struct hclge_config_max_frm_size_cmd *req;
9905         struct hclge_desc desc;
9906
9907         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9908
9909         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9910         req->max_frm_size = cpu_to_le16(new_mps);
9911         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9912
9913         return hclge_cmd_send(&hdev->hw, &desc, 1);
9914 }
9915
9916 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9917 {
9918         struct hclge_vport *vport = hclge_get_vport(handle);
9919
9920         return hclge_set_vport_mtu(vport, new_mtu);
9921 }
9922
9923 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9924 {
9925         struct hclge_dev *hdev = vport->back;
9926         int i, max_frm_size, ret;
9927
9928         /* HW supprt 2 layer vlan */
9929         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9930         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9931             max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
9932                 return -EINVAL;
9933
9934         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9935         mutex_lock(&hdev->vport_lock);
9936         /* VF's mps must fit within hdev->mps */
9937         if (vport->vport_id && max_frm_size > hdev->mps) {
9938                 mutex_unlock(&hdev->vport_lock);
9939                 return -EINVAL;
9940         } else if (vport->vport_id) {
9941                 vport->mps = max_frm_size;
9942                 mutex_unlock(&hdev->vport_lock);
9943                 return 0;
9944         }
9945
9946         /* PF's mps must be greater then VF's mps */
9947         for (i = 1; i < hdev->num_alloc_vport; i++)
9948                 if (max_frm_size < hdev->vport[i].mps) {
9949                         mutex_unlock(&hdev->vport_lock);
9950                         return -EINVAL;
9951                 }
9952
9953         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9954
9955         ret = hclge_set_mac_mtu(hdev, max_frm_size);
9956         if (ret) {
9957                 dev_err(&hdev->pdev->dev,
9958                         "Change mtu fail, ret =%d\n", ret);
9959                 goto out;
9960         }
9961
9962         hdev->mps = max_frm_size;
9963         vport->mps = max_frm_size;
9964
9965         ret = hclge_buffer_alloc(hdev);
9966         if (ret)
9967                 dev_err(&hdev->pdev->dev,
9968                         "Allocate buffer fail, ret =%d\n", ret);
9969
9970 out:
9971         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9972         mutex_unlock(&hdev->vport_lock);
9973         return ret;
9974 }
9975
9976 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9977                                     bool enable)
9978 {
9979         struct hclge_reset_tqp_queue_cmd *req;
9980         struct hclge_desc desc;
9981         int ret;
9982
9983         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9984
9985         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9986         req->tqp_id = cpu_to_le16(queue_id);
9987         if (enable)
9988                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9989
9990         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9991         if (ret) {
9992                 dev_err(&hdev->pdev->dev,
9993                         "Send tqp reset cmd error, status =%d\n", ret);
9994                 return ret;
9995         }
9996
9997         return 0;
9998 }
9999
10000 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10001 {
10002         struct hclge_reset_tqp_queue_cmd *req;
10003         struct hclge_desc desc;
10004         int ret;
10005
10006         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10007
10008         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10009         req->tqp_id = cpu_to_le16(queue_id);
10010
10011         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10012         if (ret) {
10013                 dev_err(&hdev->pdev->dev,
10014                         "Get reset status error, status =%d\n", ret);
10015                 return ret;
10016         }
10017
10018         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10019 }
10020
10021 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10022 {
10023         struct hnae3_queue *queue;
10024         struct hclge_tqp *tqp;
10025
10026         queue = handle->kinfo.tqp[queue_id];
10027         tqp = container_of(queue, struct hclge_tqp, q);
10028
10029         return tqp->index;
10030 }
10031
10032 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
10033 {
10034         struct hclge_vport *vport = hclge_get_vport(handle);
10035         struct hclge_dev *hdev = vport->back;
10036         int reset_try_times = 0;
10037         int reset_status;
10038         u16 queue_gid;
10039         int ret;
10040
10041         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
10042
10043         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
10044         if (ret) {
10045                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
10046                 return ret;
10047         }
10048
10049         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10050         if (ret) {
10051                 dev_err(&hdev->pdev->dev,
10052                         "Send reset tqp cmd fail, ret = %d\n", ret);
10053                 return ret;
10054         }
10055
10056         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10057                 reset_status = hclge_get_reset_status(hdev, queue_gid);
10058                 if (reset_status)
10059                         break;
10060
10061                 /* Wait for tqp hw reset */
10062                 usleep_range(1000, 1200);
10063         }
10064
10065         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10066                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
10067                 return ret;
10068         }
10069
10070         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10071         if (ret)
10072                 dev_err(&hdev->pdev->dev,
10073                         "Deassert the soft reset fail, ret = %d\n", ret);
10074
10075         return ret;
10076 }
10077
10078 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
10079 {
10080         struct hnae3_handle *handle = &vport->nic;
10081         struct hclge_dev *hdev = vport->back;
10082         int reset_try_times = 0;
10083         int reset_status;
10084         u16 queue_gid;
10085         int ret;
10086
10087         if (queue_id >= handle->kinfo.num_tqps) {
10088                 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
10089                          queue_id);
10090                 return;
10091         }
10092
10093         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
10094
10095         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10096         if (ret) {
10097                 dev_warn(&hdev->pdev->dev,
10098                          "Send reset tqp cmd fail, ret = %d\n", ret);
10099                 return;
10100         }
10101
10102         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10103                 reset_status = hclge_get_reset_status(hdev, queue_gid);
10104                 if (reset_status)
10105                         break;
10106
10107                 /* Wait for tqp hw reset */
10108                 usleep_range(1000, 1200);
10109         }
10110
10111         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10112                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
10113                 return;
10114         }
10115
10116         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10117         if (ret)
10118                 dev_warn(&hdev->pdev->dev,
10119                          "Deassert the soft reset fail, ret = %d\n", ret);
10120 }
10121
10122 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10123 {
10124         struct hclge_vport *vport = hclge_get_vport(handle);
10125         struct hclge_dev *hdev = vport->back;
10126
10127         return hdev->fw_version;
10128 }
10129
10130 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10131 {
10132         struct phy_device *phydev = hdev->hw.mac.phydev;
10133
10134         if (!phydev)
10135                 return;
10136
10137         phy_set_asym_pause(phydev, rx_en, tx_en);
10138 }
10139
10140 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10141 {
10142         int ret;
10143
10144         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10145                 return 0;
10146
10147         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10148         if (ret)
10149                 dev_err(&hdev->pdev->dev,
10150                         "configure pauseparam error, ret = %d.\n", ret);
10151
10152         return ret;
10153 }
10154
10155 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10156 {
10157         struct phy_device *phydev = hdev->hw.mac.phydev;
10158         u16 remote_advertising = 0;
10159         u16 local_advertising;
10160         u32 rx_pause, tx_pause;
10161         u8 flowctl;
10162
10163         if (!phydev->link || !phydev->autoneg)
10164                 return 0;
10165
10166         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10167
10168         if (phydev->pause)
10169                 remote_advertising = LPA_PAUSE_CAP;
10170
10171         if (phydev->asym_pause)
10172                 remote_advertising |= LPA_PAUSE_ASYM;
10173
10174         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10175                                            remote_advertising);
10176         tx_pause = flowctl & FLOW_CTRL_TX;
10177         rx_pause = flowctl & FLOW_CTRL_RX;
10178
10179         if (phydev->duplex == HCLGE_MAC_HALF) {
10180                 tx_pause = 0;
10181                 rx_pause = 0;
10182         }
10183
10184         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10185 }
10186
10187 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10188                                  u32 *rx_en, u32 *tx_en)
10189 {
10190         struct hclge_vport *vport = hclge_get_vport(handle);
10191         struct hclge_dev *hdev = vport->back;
10192         u8 media_type = hdev->hw.mac.media_type;
10193
10194         *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10195                     hclge_get_autoneg(handle) : 0;
10196
10197         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10198                 *rx_en = 0;
10199                 *tx_en = 0;
10200                 return;
10201         }
10202
10203         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10204                 *rx_en = 1;
10205                 *tx_en = 0;
10206         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10207                 *tx_en = 1;
10208                 *rx_en = 0;
10209         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10210                 *rx_en = 1;
10211                 *tx_en = 1;
10212         } else {
10213                 *rx_en = 0;
10214                 *tx_en = 0;
10215         }
10216 }
10217
10218 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10219                                          u32 rx_en, u32 tx_en)
10220 {
10221         if (rx_en && tx_en)
10222                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10223         else if (rx_en && !tx_en)
10224                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10225         else if (!rx_en && tx_en)
10226                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10227         else
10228                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10229
10230         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10231 }
10232
10233 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10234                                 u32 rx_en, u32 tx_en)
10235 {
10236         struct hclge_vport *vport = hclge_get_vport(handle);
10237         struct hclge_dev *hdev = vport->back;
10238         struct phy_device *phydev = hdev->hw.mac.phydev;
10239         u32 fc_autoneg;
10240
10241         if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10242                 fc_autoneg = hclge_get_autoneg(handle);
10243                 if (auto_neg != fc_autoneg) {
10244                         dev_info(&hdev->pdev->dev,
10245                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10246                         return -EOPNOTSUPP;
10247                 }
10248         }
10249
10250         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10251                 dev_info(&hdev->pdev->dev,
10252                          "Priority flow control enabled. Cannot set link flow control.\n");
10253                 return -EOPNOTSUPP;
10254         }
10255
10256         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10257
10258         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10259
10260         if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10261                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10262
10263         if (phydev)
10264                 return phy_start_aneg(phydev);
10265
10266         return -EOPNOTSUPP;
10267 }
10268
10269 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10270                                           u8 *auto_neg, u32 *speed, u8 *duplex)
10271 {
10272         struct hclge_vport *vport = hclge_get_vport(handle);
10273         struct hclge_dev *hdev = vport->back;
10274
10275         if (speed)
10276                 *speed = hdev->hw.mac.speed;
10277         if (duplex)
10278                 *duplex = hdev->hw.mac.duplex;
10279         if (auto_neg)
10280                 *auto_neg = hdev->hw.mac.autoneg;
10281 }
10282
10283 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10284                                  u8 *module_type)
10285 {
10286         struct hclge_vport *vport = hclge_get_vport(handle);
10287         struct hclge_dev *hdev = vport->back;
10288
10289         /* When nic is down, the service task is not running, doesn't update
10290          * the port information per second. Query the port information before
10291          * return the media type, ensure getting the correct media information.
10292          */
10293         hclge_update_port_info(hdev);
10294
10295         if (media_type)
10296                 *media_type = hdev->hw.mac.media_type;
10297
10298         if (module_type)
10299                 *module_type = hdev->hw.mac.module_type;
10300 }
10301
10302 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10303                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10304 {
10305         struct hclge_vport *vport = hclge_get_vport(handle);
10306         struct hclge_dev *hdev = vport->back;
10307         struct phy_device *phydev = hdev->hw.mac.phydev;
10308         int mdix_ctrl, mdix, is_resolved;
10309         unsigned int retval;
10310
10311         if (!phydev) {
10312                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10313                 *tp_mdix = ETH_TP_MDI_INVALID;
10314                 return;
10315         }
10316
10317         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10318
10319         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10320         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10321                                     HCLGE_PHY_MDIX_CTRL_S);
10322
10323         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10324         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10325         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10326
10327         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10328
10329         switch (mdix_ctrl) {
10330         case 0x0:
10331                 *tp_mdix_ctrl = ETH_TP_MDI;
10332                 break;
10333         case 0x1:
10334                 *tp_mdix_ctrl = ETH_TP_MDI_X;
10335                 break;
10336         case 0x3:
10337                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10338                 break;
10339         default:
10340                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10341                 break;
10342         }
10343
10344         if (!is_resolved)
10345                 *tp_mdix = ETH_TP_MDI_INVALID;
10346         else if (mdix)
10347                 *tp_mdix = ETH_TP_MDI_X;
10348         else
10349                 *tp_mdix = ETH_TP_MDI;
10350 }
10351
10352 static void hclge_info_show(struct hclge_dev *hdev)
10353 {
10354         struct device *dev = &hdev->pdev->dev;
10355
10356         dev_info(dev, "PF info begin:\n");
10357
10358         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10359         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10360         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10361         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10362         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10363         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10364         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10365         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10366         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10367         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10368         dev_info(dev, "This is %s PF\n",
10369                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10370         dev_info(dev, "DCB %s\n",
10371                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10372         dev_info(dev, "MQPRIO %s\n",
10373                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10374
10375         dev_info(dev, "PF info end.\n");
10376 }
10377
10378 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10379                                           struct hclge_vport *vport)
10380 {
10381         struct hnae3_client *client = vport->nic.client;
10382         struct hclge_dev *hdev = ae_dev->priv;
10383         int rst_cnt = hdev->rst_stats.reset_cnt;
10384         int ret;
10385
10386         ret = client->ops->init_instance(&vport->nic);
10387         if (ret)
10388                 return ret;
10389
10390         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10391         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10392             rst_cnt != hdev->rst_stats.reset_cnt) {
10393                 ret = -EBUSY;
10394                 goto init_nic_err;
10395         }
10396
10397         /* Enable nic hw error interrupts */
10398         ret = hclge_config_nic_hw_error(hdev, true);
10399         if (ret) {
10400                 dev_err(&ae_dev->pdev->dev,
10401                         "fail(%d) to enable hw error interrupts\n", ret);
10402                 goto init_nic_err;
10403         }
10404
10405         hnae3_set_client_init_flag(client, ae_dev, 1);
10406
10407         if (netif_msg_drv(&hdev->vport->nic))
10408                 hclge_info_show(hdev);
10409
10410         return ret;
10411
10412 init_nic_err:
10413         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10414         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10415                 msleep(HCLGE_WAIT_RESET_DONE);
10416
10417         client->ops->uninit_instance(&vport->nic, 0);
10418
10419         return ret;
10420 }
10421
10422 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10423                                            struct hclge_vport *vport)
10424 {
10425         struct hclge_dev *hdev = ae_dev->priv;
10426         struct hnae3_client *client;
10427         int rst_cnt;
10428         int ret;
10429
10430         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10431             !hdev->nic_client)
10432                 return 0;
10433
10434         client = hdev->roce_client;
10435         ret = hclge_init_roce_base_info(vport);
10436         if (ret)
10437                 return ret;
10438
10439         rst_cnt = hdev->rst_stats.reset_cnt;
10440         ret = client->ops->init_instance(&vport->roce);
10441         if (ret)
10442                 return ret;
10443
10444         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10445         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10446             rst_cnt != hdev->rst_stats.reset_cnt) {
10447                 ret = -EBUSY;
10448                 goto init_roce_err;
10449         }
10450
10451         /* Enable roce ras interrupts */
10452         ret = hclge_config_rocee_ras_interrupt(hdev, true);
10453         if (ret) {
10454                 dev_err(&ae_dev->pdev->dev,
10455                         "fail(%d) to enable roce ras interrupts\n", ret);
10456                 goto init_roce_err;
10457         }
10458
10459         hnae3_set_client_init_flag(client, ae_dev, 1);
10460
10461         return 0;
10462
10463 init_roce_err:
10464         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10465         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10466                 msleep(HCLGE_WAIT_RESET_DONE);
10467
10468         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10469
10470         return ret;
10471 }
10472
10473 static int hclge_init_client_instance(struct hnae3_client *client,
10474                                       struct hnae3_ae_dev *ae_dev)
10475 {
10476         struct hclge_dev *hdev = ae_dev->priv;
10477         struct hclge_vport *vport;
10478         int i, ret;
10479
10480         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
10481                 vport = &hdev->vport[i];
10482
10483                 switch (client->type) {
10484                 case HNAE3_CLIENT_KNIC:
10485                         hdev->nic_client = client;
10486                         vport->nic.client = client;
10487                         ret = hclge_init_nic_client_instance(ae_dev, vport);
10488                         if (ret)
10489                                 goto clear_nic;
10490
10491                         ret = hclge_init_roce_client_instance(ae_dev, vport);
10492                         if (ret)
10493                                 goto clear_roce;
10494
10495                         break;
10496                 case HNAE3_CLIENT_ROCE:
10497                         if (hnae3_dev_roce_supported(hdev)) {
10498                                 hdev->roce_client = client;
10499                                 vport->roce.client = client;
10500                         }
10501
10502                         ret = hclge_init_roce_client_instance(ae_dev, vport);
10503                         if (ret)
10504                                 goto clear_roce;
10505
10506                         break;
10507                 default:
10508                         return -EINVAL;
10509                 }
10510         }
10511
10512         return 0;
10513
10514 clear_nic:
10515         hdev->nic_client = NULL;
10516         vport->nic.client = NULL;
10517         return ret;
10518 clear_roce:
10519         hdev->roce_client = NULL;
10520         vport->roce.client = NULL;
10521         return ret;
10522 }
10523
10524 static void hclge_uninit_client_instance(struct hnae3_client *client,
10525                                          struct hnae3_ae_dev *ae_dev)
10526 {
10527         struct hclge_dev *hdev = ae_dev->priv;
10528         struct hclge_vport *vport;
10529         int i;
10530
10531         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10532                 vport = &hdev->vport[i];
10533                 if (hdev->roce_client) {
10534                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10535                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10536                                 msleep(HCLGE_WAIT_RESET_DONE);
10537
10538                         hdev->roce_client->ops->uninit_instance(&vport->roce,
10539                                                                 0);
10540                         hdev->roce_client = NULL;
10541                         vport->roce.client = NULL;
10542                 }
10543                 if (client->type == HNAE3_CLIENT_ROCE)
10544                         return;
10545                 if (hdev->nic_client && client->ops->uninit_instance) {
10546                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10547                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10548                                 msleep(HCLGE_WAIT_RESET_DONE);
10549
10550                         client->ops->uninit_instance(&vport->nic, 0);
10551                         hdev->nic_client = NULL;
10552                         vport->nic.client = NULL;
10553                 }
10554         }
10555 }
10556
10557 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10558 {
10559 #define HCLGE_MEM_BAR           4
10560
10561         struct pci_dev *pdev = hdev->pdev;
10562         struct hclge_hw *hw = &hdev->hw;
10563
10564         /* for device does not have device memory, return directly */
10565         if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10566                 return 0;
10567
10568         hw->mem_base = devm_ioremap_wc(&pdev->dev,
10569                                        pci_resource_start(pdev, HCLGE_MEM_BAR),
10570                                        pci_resource_len(pdev, HCLGE_MEM_BAR));
10571         if (!hw->mem_base) {
10572                 dev_err(&pdev->dev, "failed to map device memory\n");
10573                 return -EFAULT;
10574         }
10575
10576         return 0;
10577 }
10578
10579 static int hclge_pci_init(struct hclge_dev *hdev)
10580 {
10581         struct pci_dev *pdev = hdev->pdev;
10582         struct hclge_hw *hw;
10583         int ret;
10584
10585         ret = pci_enable_device(pdev);
10586         if (ret) {
10587                 dev_err(&pdev->dev, "failed to enable PCI device\n");
10588                 return ret;
10589         }
10590
10591         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10592         if (ret) {
10593                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10594                 if (ret) {
10595                         dev_err(&pdev->dev,
10596                                 "can't set consistent PCI DMA");
10597                         goto err_disable_device;
10598                 }
10599                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10600         }
10601
10602         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10603         if (ret) {
10604                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10605                 goto err_disable_device;
10606         }
10607
10608         pci_set_master(pdev);
10609         hw = &hdev->hw;
10610         hw->io_base = pcim_iomap(pdev, 2, 0);
10611         if (!hw->io_base) {
10612                 dev_err(&pdev->dev, "Can't map configuration register space\n");
10613                 ret = -ENOMEM;
10614                 goto err_clr_master;
10615         }
10616
10617         ret = hclge_dev_mem_map(hdev);
10618         if (ret)
10619                 goto err_unmap_io_base;
10620
10621         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10622
10623         return 0;
10624
10625 err_unmap_io_base:
10626         pcim_iounmap(pdev, hdev->hw.io_base);
10627 err_clr_master:
10628         pci_clear_master(pdev);
10629         pci_release_regions(pdev);
10630 err_disable_device:
10631         pci_disable_device(pdev);
10632
10633         return ret;
10634 }
10635
10636 static void hclge_pci_uninit(struct hclge_dev *hdev)
10637 {
10638         struct pci_dev *pdev = hdev->pdev;
10639
10640         if (hdev->hw.mem_base)
10641                 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10642
10643         pcim_iounmap(pdev, hdev->hw.io_base);
10644         pci_free_irq_vectors(pdev);
10645         pci_clear_master(pdev);
10646         pci_release_mem_regions(pdev);
10647         pci_disable_device(pdev);
10648 }
10649
10650 static void hclge_state_init(struct hclge_dev *hdev)
10651 {
10652         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10653         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10654         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10655         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10656         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10657         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10658         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10659 }
10660
10661 static void hclge_state_uninit(struct hclge_dev *hdev)
10662 {
10663         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10664         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10665
10666         if (hdev->reset_timer.function)
10667                 del_timer_sync(&hdev->reset_timer);
10668         if (hdev->service_task.work.func)
10669                 cancel_delayed_work_sync(&hdev->service_task);
10670 }
10671
10672 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10673 {
10674 #define HCLGE_FLR_RETRY_WAIT_MS 500
10675 #define HCLGE_FLR_RETRY_CNT     5
10676
10677         struct hclge_dev *hdev = ae_dev->priv;
10678         int retry_cnt = 0;
10679         int ret;
10680
10681 retry:
10682         down(&hdev->reset_sem);
10683         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10684         hdev->reset_type = HNAE3_FLR_RESET;
10685         ret = hclge_reset_prepare(hdev);
10686         if (ret || hdev->reset_pending) {
10687                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10688                         ret);
10689                 if (hdev->reset_pending ||
10690                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10691                         dev_err(&hdev->pdev->dev,
10692                                 "reset_pending:0x%lx, retry_cnt:%d\n",
10693                                 hdev->reset_pending, retry_cnt);
10694                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10695                         up(&hdev->reset_sem);
10696                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
10697                         goto retry;
10698                 }
10699         }
10700
10701         /* disable misc vector before FLR done */
10702         hclge_enable_vector(&hdev->misc_vector, false);
10703         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10704         hdev->rst_stats.flr_rst_cnt++;
10705 }
10706
10707 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10708 {
10709         struct hclge_dev *hdev = ae_dev->priv;
10710         int ret;
10711
10712         hclge_enable_vector(&hdev->misc_vector, true);
10713
10714         ret = hclge_reset_rebuild(hdev);
10715         if (ret)
10716                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10717
10718         hdev->reset_type = HNAE3_NONE_RESET;
10719         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10720         up(&hdev->reset_sem);
10721 }
10722
10723 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10724 {
10725         u16 i;
10726
10727         for (i = 0; i < hdev->num_alloc_vport; i++) {
10728                 struct hclge_vport *vport = &hdev->vport[i];
10729                 int ret;
10730
10731                  /* Send cmd to clear VF's FUNC_RST_ING */
10732                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10733                 if (ret)
10734                         dev_warn(&hdev->pdev->dev,
10735                                  "clear vf(%u) rst failed %d!\n",
10736                                  vport->vport_id, ret);
10737         }
10738 }
10739
10740 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10741 {
10742         struct pci_dev *pdev = ae_dev->pdev;
10743         struct hclge_dev *hdev;
10744         int ret;
10745
10746         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10747         if (!hdev)
10748                 return -ENOMEM;
10749
10750         hdev->pdev = pdev;
10751         hdev->ae_dev = ae_dev;
10752         hdev->reset_type = HNAE3_NONE_RESET;
10753         hdev->reset_level = HNAE3_FUNC_RESET;
10754         ae_dev->priv = hdev;
10755
10756         /* HW supprt 2 layer vlan */
10757         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10758
10759         mutex_init(&hdev->vport_lock);
10760         spin_lock_init(&hdev->fd_rule_lock);
10761         sema_init(&hdev->reset_sem, 1);
10762
10763         ret = hclge_pci_init(hdev);
10764         if (ret)
10765                 goto out;
10766
10767         /* Firmware command queue initialize */
10768         ret = hclge_cmd_queue_init(hdev);
10769         if (ret)
10770                 goto err_pci_uninit;
10771
10772         /* Firmware command initialize */
10773         ret = hclge_cmd_init(hdev);
10774         if (ret)
10775                 goto err_cmd_uninit;
10776
10777         ret = hclge_get_cap(hdev);
10778         if (ret)
10779                 goto err_cmd_uninit;
10780
10781         ret = hclge_query_dev_specs(hdev);
10782         if (ret) {
10783                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10784                         ret);
10785                 goto err_cmd_uninit;
10786         }
10787
10788         ret = hclge_configure(hdev);
10789         if (ret) {
10790                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10791                 goto err_cmd_uninit;
10792         }
10793
10794         ret = hclge_init_msi(hdev);
10795         if (ret) {
10796                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10797                 goto err_cmd_uninit;
10798         }
10799
10800         ret = hclge_misc_irq_init(hdev);
10801         if (ret)
10802                 goto err_msi_uninit;
10803
10804         ret = hclge_alloc_tqps(hdev);
10805         if (ret) {
10806                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10807                 goto err_msi_irq_uninit;
10808         }
10809
10810         ret = hclge_alloc_vport(hdev);
10811         if (ret)
10812                 goto err_msi_irq_uninit;
10813
10814         ret = hclge_map_tqp(hdev);
10815         if (ret)
10816                 goto err_msi_irq_uninit;
10817
10818         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
10819             !hnae3_dev_phy_imp_supported(hdev)) {
10820                 ret = hclge_mac_mdio_config(hdev);
10821                 if (ret)
10822                         goto err_msi_irq_uninit;
10823         }
10824
10825         ret = hclge_init_umv_space(hdev);
10826         if (ret)
10827                 goto err_mdiobus_unreg;
10828
10829         ret = hclge_mac_init(hdev);
10830         if (ret) {
10831                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10832                 goto err_mdiobus_unreg;
10833         }
10834
10835         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10836         if (ret) {
10837                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10838                 goto err_mdiobus_unreg;
10839         }
10840
10841         ret = hclge_config_gro(hdev, true);
10842         if (ret)
10843                 goto err_mdiobus_unreg;
10844
10845         ret = hclge_init_vlan_config(hdev);
10846         if (ret) {
10847                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10848                 goto err_mdiobus_unreg;
10849         }
10850
10851         ret = hclge_tm_schd_init(hdev);
10852         if (ret) {
10853                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10854                 goto err_mdiobus_unreg;
10855         }
10856
10857         ret = hclge_rss_init_cfg(hdev);
10858         if (ret) {
10859                 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
10860                 goto err_mdiobus_unreg;
10861         }
10862
10863         ret = hclge_rss_init_hw(hdev);
10864         if (ret) {
10865                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10866                 goto err_mdiobus_unreg;
10867         }
10868
10869         ret = init_mgr_tbl(hdev);
10870         if (ret) {
10871                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10872                 goto err_mdiobus_unreg;
10873         }
10874
10875         ret = hclge_init_fd_config(hdev);
10876         if (ret) {
10877                 dev_err(&pdev->dev,
10878                         "fd table init fail, ret=%d\n", ret);
10879                 goto err_mdiobus_unreg;
10880         }
10881
10882         INIT_KFIFO(hdev->mac_tnl_log);
10883
10884         hclge_dcb_ops_set(hdev);
10885
10886         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10887         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10888
10889         /* Setup affinity after service timer setup because add_timer_on
10890          * is called in affinity notify.
10891          */
10892         hclge_misc_affinity_setup(hdev);
10893
10894         hclge_clear_all_event_cause(hdev);
10895         hclge_clear_resetting_state(hdev);
10896
10897         /* Log and clear the hw errors those already occurred */
10898         hclge_handle_all_hns_hw_errors(ae_dev);
10899
10900         /* request delayed reset for the error recovery because an immediate
10901          * global reset on a PF affecting pending initialization of other PFs
10902          */
10903         if (ae_dev->hw_err_reset_req) {
10904                 enum hnae3_reset_type reset_level;
10905
10906                 reset_level = hclge_get_reset_level(ae_dev,
10907                                                     &ae_dev->hw_err_reset_req);
10908                 hclge_set_def_reset_request(ae_dev, reset_level);
10909                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10910         }
10911
10912         /* Enable MISC vector(vector0) */
10913         hclge_enable_vector(&hdev->misc_vector, true);
10914
10915         hclge_state_init(hdev);
10916         hdev->last_reset_time = jiffies;
10917
10918         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10919                  HCLGE_DRIVER_NAME);
10920
10921         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10922
10923         return 0;
10924
10925 err_mdiobus_unreg:
10926         if (hdev->hw.mac.phydev)
10927                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10928 err_msi_irq_uninit:
10929         hclge_misc_irq_uninit(hdev);
10930 err_msi_uninit:
10931         pci_free_irq_vectors(pdev);
10932 err_cmd_uninit:
10933         hclge_cmd_uninit(hdev);
10934 err_pci_uninit:
10935         pcim_iounmap(pdev, hdev->hw.io_base);
10936         pci_clear_master(pdev);
10937         pci_release_regions(pdev);
10938         pci_disable_device(pdev);
10939 out:
10940         mutex_destroy(&hdev->vport_lock);
10941         return ret;
10942 }
10943
10944 static void hclge_stats_clear(struct hclge_dev *hdev)
10945 {
10946         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10947 }
10948
10949 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10950 {
10951         return hclge_config_switch_param(hdev, vf, enable,
10952                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
10953 }
10954
10955 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10956 {
10957         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10958                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
10959                                           enable, vf);
10960 }
10961
10962 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10963 {
10964         int ret;
10965
10966         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10967         if (ret) {
10968                 dev_err(&hdev->pdev->dev,
10969                         "Set vf %d mac spoof check %s failed, ret=%d\n",
10970                         vf, enable ? "on" : "off", ret);
10971                 return ret;
10972         }
10973
10974         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10975         if (ret)
10976                 dev_err(&hdev->pdev->dev,
10977                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
10978                         vf, enable ? "on" : "off", ret);
10979
10980         return ret;
10981 }
10982
10983 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10984                                  bool enable)
10985 {
10986         struct hclge_vport *vport = hclge_get_vport(handle);
10987         struct hclge_dev *hdev = vport->back;
10988         u32 new_spoofchk = enable ? 1 : 0;
10989         int ret;
10990
10991         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10992                 return -EOPNOTSUPP;
10993
10994         vport = hclge_get_vf_vport(hdev, vf);
10995         if (!vport)
10996                 return -EINVAL;
10997
10998         if (vport->vf_info.spoofchk == new_spoofchk)
10999                 return 0;
11000
11001         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11002                 dev_warn(&hdev->pdev->dev,
11003                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11004                          vf);
11005         else if (enable && hclge_is_umv_space_full(vport, true))
11006                 dev_warn(&hdev->pdev->dev,
11007                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11008                          vf);
11009
11010         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11011         if (ret)
11012                 return ret;
11013
11014         vport->vf_info.spoofchk = new_spoofchk;
11015         return 0;
11016 }
11017
11018 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11019 {
11020         struct hclge_vport *vport = hdev->vport;
11021         int ret;
11022         int i;
11023
11024         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11025                 return 0;
11026
11027         /* resume the vf spoof check state after reset */
11028         for (i = 0; i < hdev->num_alloc_vport; i++) {
11029                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11030                                                vport->vf_info.spoofchk);
11031                 if (ret)
11032                         return ret;
11033
11034                 vport++;
11035         }
11036
11037         return 0;
11038 }
11039
11040 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11041 {
11042         struct hclge_vport *vport = hclge_get_vport(handle);
11043         struct hclge_dev *hdev = vport->back;
11044         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
11045         u32 new_trusted = enable ? 1 : 0;
11046         bool en_bc_pmc;
11047         int ret;
11048
11049         vport = hclge_get_vf_vport(hdev, vf);
11050         if (!vport)
11051                 return -EINVAL;
11052
11053         if (vport->vf_info.trusted == new_trusted)
11054                 return 0;
11055
11056         /* Disable promisc mode for VF if it is not trusted any more. */
11057         if (!enable && vport->vf_info.promisc_enable) {
11058                 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
11059                 ret = hclge_set_vport_promisc_mode(vport, false, false,
11060                                                    en_bc_pmc);
11061                 if (ret)
11062                         return ret;
11063                 vport->vf_info.promisc_enable = 0;
11064                 hclge_inform_vf_promisc_info(vport);
11065         }
11066
11067         vport->vf_info.trusted = new_trusted;
11068
11069         return 0;
11070 }
11071
11072 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11073 {
11074         int ret;
11075         int vf;
11076
11077         /* reset vf rate to default value */
11078         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11079                 struct hclge_vport *vport = &hdev->vport[vf];
11080
11081                 vport->vf_info.max_tx_rate = 0;
11082                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11083                 if (ret)
11084                         dev_err(&hdev->pdev->dev,
11085                                 "vf%d failed to reset to default, ret=%d\n",
11086                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
11087         }
11088 }
11089
11090 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11091                                      int min_tx_rate, int max_tx_rate)
11092 {
11093         if (min_tx_rate != 0 ||
11094             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11095                 dev_err(&hdev->pdev->dev,
11096                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11097                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11098                 return -EINVAL;
11099         }
11100
11101         return 0;
11102 }
11103
11104 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11105                              int min_tx_rate, int max_tx_rate, bool force)
11106 {
11107         struct hclge_vport *vport = hclge_get_vport(handle);
11108         struct hclge_dev *hdev = vport->back;
11109         int ret;
11110
11111         ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11112         if (ret)
11113                 return ret;
11114
11115         vport = hclge_get_vf_vport(hdev, vf);
11116         if (!vport)
11117                 return -EINVAL;
11118
11119         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11120                 return 0;
11121
11122         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11123         if (ret)
11124                 return ret;
11125
11126         vport->vf_info.max_tx_rate = max_tx_rate;
11127
11128         return 0;
11129 }
11130
11131 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11132 {
11133         struct hnae3_handle *handle = &hdev->vport->nic;
11134         struct hclge_vport *vport;
11135         int ret;
11136         int vf;
11137
11138         /* resume the vf max_tx_rate after reset */
11139         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11140                 vport = hclge_get_vf_vport(hdev, vf);
11141                 if (!vport)
11142                         return -EINVAL;
11143
11144                 /* zero means max rate, after reset, firmware already set it to
11145                  * max rate, so just continue.
11146                  */
11147                 if (!vport->vf_info.max_tx_rate)
11148                         continue;
11149
11150                 ret = hclge_set_vf_rate(handle, vf, 0,
11151                                         vport->vf_info.max_tx_rate, true);
11152                 if (ret) {
11153                         dev_err(&hdev->pdev->dev,
11154                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11155                                 vf, vport->vf_info.max_tx_rate, ret);
11156                         return ret;
11157                 }
11158         }
11159
11160         return 0;
11161 }
11162
11163 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11164 {
11165         struct hclge_vport *vport = hdev->vport;
11166         int i;
11167
11168         for (i = 0; i < hdev->num_alloc_vport; i++) {
11169                 hclge_vport_stop(vport);
11170                 vport++;
11171         }
11172 }
11173
11174 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11175 {
11176         struct hclge_dev *hdev = ae_dev->priv;
11177         struct pci_dev *pdev = ae_dev->pdev;
11178         int ret;
11179
11180         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11181
11182         hclge_stats_clear(hdev);
11183         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11184          * so here should not clean table in memory.
11185          */
11186         if (hdev->reset_type == HNAE3_IMP_RESET ||
11187             hdev->reset_type == HNAE3_GLOBAL_RESET) {
11188                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11189                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11190                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11191                 hclge_reset_umv_space(hdev);
11192         }
11193
11194         ret = hclge_cmd_init(hdev);
11195         if (ret) {
11196                 dev_err(&pdev->dev, "Cmd queue init failed\n");
11197                 return ret;
11198         }
11199
11200         ret = hclge_map_tqp(hdev);
11201         if (ret) {
11202                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11203                 return ret;
11204         }
11205
11206         ret = hclge_mac_init(hdev);
11207         if (ret) {
11208                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11209                 return ret;
11210         }
11211
11212         ret = hclge_tp_port_init(hdev);
11213         if (ret) {
11214                 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11215                         ret);
11216                 return ret;
11217         }
11218
11219         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11220         if (ret) {
11221                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11222                 return ret;
11223         }
11224
11225         ret = hclge_config_gro(hdev, true);
11226         if (ret)
11227                 return ret;
11228
11229         ret = hclge_init_vlan_config(hdev);
11230         if (ret) {
11231                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11232                 return ret;
11233         }
11234
11235         ret = hclge_tm_init_hw(hdev, true);
11236         if (ret) {
11237                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11238                 return ret;
11239         }
11240
11241         ret = hclge_rss_init_hw(hdev);
11242         if (ret) {
11243                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11244                 return ret;
11245         }
11246
11247         ret = init_mgr_tbl(hdev);
11248         if (ret) {
11249                 dev_err(&pdev->dev,
11250                         "failed to reinit manager table, ret = %d\n", ret);
11251                 return ret;
11252         }
11253
11254         ret = hclge_init_fd_config(hdev);
11255         if (ret) {
11256                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11257                 return ret;
11258         }
11259
11260         /* Log and clear the hw errors those already occurred */
11261         hclge_handle_all_hns_hw_errors(ae_dev);
11262
11263         /* Re-enable the hw error interrupts because
11264          * the interrupts get disabled on global reset.
11265          */
11266         ret = hclge_config_nic_hw_error(hdev, true);
11267         if (ret) {
11268                 dev_err(&pdev->dev,
11269                         "fail(%d) to re-enable NIC hw error interrupts\n",
11270                         ret);
11271                 return ret;
11272         }
11273
11274         if (hdev->roce_client) {
11275                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11276                 if (ret) {
11277                         dev_err(&pdev->dev,
11278                                 "fail(%d) to re-enable roce ras interrupts\n",
11279                                 ret);
11280                         return ret;
11281                 }
11282         }
11283
11284         hclge_reset_vport_state(hdev);
11285         ret = hclge_reset_vport_spoofchk(hdev);
11286         if (ret)
11287                 return ret;
11288
11289         ret = hclge_resume_vf_rate(hdev);
11290         if (ret)
11291                 return ret;
11292
11293         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11294                  HCLGE_DRIVER_NAME);
11295
11296         return 0;
11297 }
11298
11299 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11300 {
11301         struct hclge_dev *hdev = ae_dev->priv;
11302         struct hclge_mac *mac = &hdev->hw.mac;
11303
11304         hclge_reset_vf_rate(hdev);
11305         hclge_clear_vf_vlan(hdev);
11306         hclge_misc_affinity_teardown(hdev);
11307         hclge_state_uninit(hdev);
11308         hclge_uninit_mac_table(hdev);
11309
11310         if (mac->phydev)
11311                 mdiobus_unregister(mac->mdio_bus);
11312
11313         /* Disable MISC vector(vector0) */
11314         hclge_enable_vector(&hdev->misc_vector, false);
11315         synchronize_irq(hdev->misc_vector.vector_irq);
11316
11317         /* Disable all hw interrupts */
11318         hclge_config_mac_tnl_int(hdev, false);
11319         hclge_config_nic_hw_error(hdev, false);
11320         hclge_config_rocee_ras_interrupt(hdev, false);
11321
11322         hclge_cmd_uninit(hdev);
11323         hclge_misc_irq_uninit(hdev);
11324         hclge_pci_uninit(hdev);
11325         mutex_destroy(&hdev->vport_lock);
11326         hclge_uninit_vport_vlan_table(hdev);
11327         ae_dev->priv = NULL;
11328 }
11329
11330 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11331 {
11332         struct hclge_vport *vport = hclge_get_vport(handle);
11333         struct hclge_dev *hdev = vport->back;
11334
11335         return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11336 }
11337
11338 static void hclge_get_channels(struct hnae3_handle *handle,
11339                                struct ethtool_channels *ch)
11340 {
11341         ch->max_combined = hclge_get_max_channels(handle);
11342         ch->other_count = 1;
11343         ch->max_other = 1;
11344         ch->combined_count = handle->kinfo.rss_size;
11345 }
11346
11347 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11348                                         u16 *alloc_tqps, u16 *max_rss_size)
11349 {
11350         struct hclge_vport *vport = hclge_get_vport(handle);
11351         struct hclge_dev *hdev = vport->back;
11352
11353         *alloc_tqps = vport->alloc_tqps;
11354         *max_rss_size = hdev->pf_rss_size_max;
11355 }
11356
11357 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11358                               bool rxfh_configured)
11359 {
11360         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11361         struct hclge_vport *vport = hclge_get_vport(handle);
11362         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11363         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11364         struct hclge_dev *hdev = vport->back;
11365         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11366         u16 cur_rss_size = kinfo->rss_size;
11367         u16 cur_tqps = kinfo->num_tqps;
11368         u16 tc_valid[HCLGE_MAX_TC_NUM];
11369         u16 roundup_size;
11370         u32 *rss_indir;
11371         unsigned int i;
11372         int ret;
11373
11374         kinfo->req_rss_size = new_tqps_num;
11375
11376         ret = hclge_tm_vport_map_update(hdev);
11377         if (ret) {
11378                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11379                 return ret;
11380         }
11381
11382         roundup_size = roundup_pow_of_two(kinfo->rss_size);
11383         roundup_size = ilog2(roundup_size);
11384         /* Set the RSS TC mode according to the new RSS size */
11385         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11386                 tc_valid[i] = 0;
11387
11388                 if (!(hdev->hw_tc_map & BIT(i)))
11389                         continue;
11390
11391                 tc_valid[i] = 1;
11392                 tc_size[i] = roundup_size;
11393                 tc_offset[i] = kinfo->rss_size * i;
11394         }
11395         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11396         if (ret)
11397                 return ret;
11398
11399         /* RSS indirection table has been configuared by user */
11400         if (rxfh_configured)
11401                 goto out;
11402
11403         /* Reinitializes the rss indirect table according to the new RSS size */
11404         rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11405                             GFP_KERNEL);
11406         if (!rss_indir)
11407                 return -ENOMEM;
11408
11409         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11410                 rss_indir[i] = i % kinfo->rss_size;
11411
11412         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11413         if (ret)
11414                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11415                         ret);
11416
11417         kfree(rss_indir);
11418
11419 out:
11420         if (!ret)
11421                 dev_info(&hdev->pdev->dev,
11422                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11423                          cur_rss_size, kinfo->rss_size,
11424                          cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11425
11426         return ret;
11427 }
11428
11429 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11430                               u32 *regs_num_64_bit)
11431 {
11432         struct hclge_desc desc;
11433         u32 total_num;
11434         int ret;
11435
11436         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11437         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11438         if (ret) {
11439                 dev_err(&hdev->pdev->dev,
11440                         "Query register number cmd failed, ret = %d.\n", ret);
11441                 return ret;
11442         }
11443
11444         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11445         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11446
11447         total_num = *regs_num_32_bit + *regs_num_64_bit;
11448         if (!total_num)
11449                 return -EINVAL;
11450
11451         return 0;
11452 }
11453
11454 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11455                                  void *data)
11456 {
11457 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11458 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11459
11460         struct hclge_desc *desc;
11461         u32 *reg_val = data;
11462         __le32 *desc_data;
11463         int nodata_num;
11464         int cmd_num;
11465         int i, k, n;
11466         int ret;
11467
11468         if (regs_num == 0)
11469                 return 0;
11470
11471         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11472         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11473                                HCLGE_32_BIT_REG_RTN_DATANUM);
11474         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11475         if (!desc)
11476                 return -ENOMEM;
11477
11478         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11479         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11480         if (ret) {
11481                 dev_err(&hdev->pdev->dev,
11482                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
11483                 kfree(desc);
11484                 return ret;
11485         }
11486
11487         for (i = 0; i < cmd_num; i++) {
11488                 if (i == 0) {
11489                         desc_data = (__le32 *)(&desc[i].data[0]);
11490                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11491                 } else {
11492                         desc_data = (__le32 *)(&desc[i]);
11493                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
11494                 }
11495                 for (k = 0; k < n; k++) {
11496                         *reg_val++ = le32_to_cpu(*desc_data++);
11497
11498                         regs_num--;
11499                         if (!regs_num)
11500                                 break;
11501                 }
11502         }
11503
11504         kfree(desc);
11505         return 0;
11506 }
11507
11508 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11509                                  void *data)
11510 {
11511 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11512 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11513
11514         struct hclge_desc *desc;
11515         u64 *reg_val = data;
11516         __le64 *desc_data;
11517         int nodata_len;
11518         int cmd_num;
11519         int i, k, n;
11520         int ret;
11521
11522         if (regs_num == 0)
11523                 return 0;
11524
11525         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11526         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11527                                HCLGE_64_BIT_REG_RTN_DATANUM);
11528         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11529         if (!desc)
11530                 return -ENOMEM;
11531
11532         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11533         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11534         if (ret) {
11535                 dev_err(&hdev->pdev->dev,
11536                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
11537                 kfree(desc);
11538                 return ret;
11539         }
11540
11541         for (i = 0; i < cmd_num; i++) {
11542                 if (i == 0) {
11543                         desc_data = (__le64 *)(&desc[i].data[0]);
11544                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11545                 } else {
11546                         desc_data = (__le64 *)(&desc[i]);
11547                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
11548                 }
11549                 for (k = 0; k < n; k++) {
11550                         *reg_val++ = le64_to_cpu(*desc_data++);
11551
11552                         regs_num--;
11553                         if (!regs_num)
11554                                 break;
11555                 }
11556         }
11557
11558         kfree(desc);
11559         return 0;
11560 }
11561
11562 #define MAX_SEPARATE_NUM        4
11563 #define SEPARATOR_VALUE         0xFDFCFBFA
11564 #define REG_NUM_PER_LINE        4
11565 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
11566 #define REG_SEPARATOR_LINE      1
11567 #define REG_NUM_REMAIN_MASK     3
11568 #define BD_LIST_MAX_NUM         30
11569
11570 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11571 {
11572         int i;
11573
11574         /* initialize command BD except the last one */
11575         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11576                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11577                                            true);
11578                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11579         }
11580
11581         /* initialize the last command BD */
11582         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11583
11584         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11585 }
11586
11587 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11588                                     int *bd_num_list,
11589                                     u32 type_num)
11590 {
11591         u32 entries_per_desc, desc_index, index, offset, i;
11592         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11593         int ret;
11594
11595         ret = hclge_query_bd_num_cmd_send(hdev, desc);
11596         if (ret) {
11597                 dev_err(&hdev->pdev->dev,
11598                         "Get dfx bd num fail, status is %d.\n", ret);
11599                 return ret;
11600         }
11601
11602         entries_per_desc = ARRAY_SIZE(desc[0].data);
11603         for (i = 0; i < type_num; i++) {
11604                 offset = hclge_dfx_bd_offset_list[i];
11605                 index = offset % entries_per_desc;
11606                 desc_index = offset / entries_per_desc;
11607                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11608         }
11609
11610         return ret;
11611 }
11612
11613 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11614                                   struct hclge_desc *desc_src, int bd_num,
11615                                   enum hclge_opcode_type cmd)
11616 {
11617         struct hclge_desc *desc = desc_src;
11618         int i, ret;
11619
11620         hclge_cmd_setup_basic_desc(desc, cmd, true);
11621         for (i = 0; i < bd_num - 1; i++) {
11622                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11623                 desc++;
11624                 hclge_cmd_setup_basic_desc(desc, cmd, true);
11625         }
11626
11627         desc = desc_src;
11628         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11629         if (ret)
11630                 dev_err(&hdev->pdev->dev,
11631                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11632                         cmd, ret);
11633
11634         return ret;
11635 }
11636
11637 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11638                                     void *data)
11639 {
11640         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11641         struct hclge_desc *desc = desc_src;
11642         u32 *reg = data;
11643
11644         entries_per_desc = ARRAY_SIZE(desc->data);
11645         reg_num = entries_per_desc * bd_num;
11646         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11647         for (i = 0; i < reg_num; i++) {
11648                 index = i % entries_per_desc;
11649                 desc_index = i / entries_per_desc;
11650                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
11651         }
11652         for (i = 0; i < separator_num; i++)
11653                 *reg++ = SEPARATOR_VALUE;
11654
11655         return reg_num + separator_num;
11656 }
11657
11658 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11659 {
11660         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11661         int data_len_per_desc, bd_num, i;
11662         int bd_num_list[BD_LIST_MAX_NUM];
11663         u32 data_len;
11664         int ret;
11665
11666         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11667         if (ret) {
11668                 dev_err(&hdev->pdev->dev,
11669                         "Get dfx reg bd num fail, status is %d.\n", ret);
11670                 return ret;
11671         }
11672
11673         data_len_per_desc = sizeof_field(struct hclge_desc, data);
11674         *len = 0;
11675         for (i = 0; i < dfx_reg_type_num; i++) {
11676                 bd_num = bd_num_list[i];
11677                 data_len = data_len_per_desc * bd_num;
11678                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11679         }
11680
11681         return ret;
11682 }
11683
11684 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11685 {
11686         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11687         int bd_num, bd_num_max, buf_len, i;
11688         int bd_num_list[BD_LIST_MAX_NUM];
11689         struct hclge_desc *desc_src;
11690         u32 *reg = data;
11691         int ret;
11692
11693         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11694         if (ret) {
11695                 dev_err(&hdev->pdev->dev,
11696                         "Get dfx reg bd num fail, status is %d.\n", ret);
11697                 return ret;
11698         }
11699
11700         bd_num_max = bd_num_list[0];
11701         for (i = 1; i < dfx_reg_type_num; i++)
11702                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11703
11704         buf_len = sizeof(*desc_src) * bd_num_max;
11705         desc_src = kzalloc(buf_len, GFP_KERNEL);
11706         if (!desc_src)
11707                 return -ENOMEM;
11708
11709         for (i = 0; i < dfx_reg_type_num; i++) {
11710                 bd_num = bd_num_list[i];
11711                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11712                                              hclge_dfx_reg_opcode_list[i]);
11713                 if (ret) {
11714                         dev_err(&hdev->pdev->dev,
11715                                 "Get dfx reg fail, status is %d.\n", ret);
11716                         break;
11717                 }
11718
11719                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11720         }
11721
11722         kfree(desc_src);
11723         return ret;
11724 }
11725
11726 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11727                               struct hnae3_knic_private_info *kinfo)
11728 {
11729 #define HCLGE_RING_REG_OFFSET           0x200
11730 #define HCLGE_RING_INT_REG_OFFSET       0x4
11731
11732         int i, j, reg_num, separator_num;
11733         int data_num_sum;
11734         u32 *reg = data;
11735
11736         /* fetching per-PF registers valus from PF PCIe register space */
11737         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11738         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11739         for (i = 0; i < reg_num; i++)
11740                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11741         for (i = 0; i < separator_num; i++)
11742                 *reg++ = SEPARATOR_VALUE;
11743         data_num_sum = reg_num + separator_num;
11744
11745         reg_num = ARRAY_SIZE(common_reg_addr_list);
11746         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11747         for (i = 0; i < reg_num; i++)
11748                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11749         for (i = 0; i < separator_num; i++)
11750                 *reg++ = SEPARATOR_VALUE;
11751         data_num_sum += reg_num + separator_num;
11752
11753         reg_num = ARRAY_SIZE(ring_reg_addr_list);
11754         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11755         for (j = 0; j < kinfo->num_tqps; j++) {
11756                 for (i = 0; i < reg_num; i++)
11757                         *reg++ = hclge_read_dev(&hdev->hw,
11758                                                 ring_reg_addr_list[i] +
11759                                                 HCLGE_RING_REG_OFFSET * j);
11760                 for (i = 0; i < separator_num; i++)
11761                         *reg++ = SEPARATOR_VALUE;
11762         }
11763         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11764
11765         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11766         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11767         for (j = 0; j < hdev->num_msi_used - 1; j++) {
11768                 for (i = 0; i < reg_num; i++)
11769                         *reg++ = hclge_read_dev(&hdev->hw,
11770                                                 tqp_intr_reg_addr_list[i] +
11771                                                 HCLGE_RING_INT_REG_OFFSET * j);
11772                 for (i = 0; i < separator_num; i++)
11773                         *reg++ = SEPARATOR_VALUE;
11774         }
11775         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11776
11777         return data_num_sum;
11778 }
11779
11780 static int hclge_get_regs_len(struct hnae3_handle *handle)
11781 {
11782         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11783         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11784         struct hclge_vport *vport = hclge_get_vport(handle);
11785         struct hclge_dev *hdev = vport->back;
11786         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11787         int regs_lines_32_bit, regs_lines_64_bit;
11788         int ret;
11789
11790         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11791         if (ret) {
11792                 dev_err(&hdev->pdev->dev,
11793                         "Get register number failed, ret = %d.\n", ret);
11794                 return ret;
11795         }
11796
11797         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11798         if (ret) {
11799                 dev_err(&hdev->pdev->dev,
11800                         "Get dfx reg len failed, ret = %d.\n", ret);
11801                 return ret;
11802         }
11803
11804         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11805                 REG_SEPARATOR_LINE;
11806         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11807                 REG_SEPARATOR_LINE;
11808         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11809                 REG_SEPARATOR_LINE;
11810         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11811                 REG_SEPARATOR_LINE;
11812         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11813                 REG_SEPARATOR_LINE;
11814         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11815                 REG_SEPARATOR_LINE;
11816
11817         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11818                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11819                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11820 }
11821
11822 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11823                            void *data)
11824 {
11825         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11826         struct hclge_vport *vport = hclge_get_vport(handle);
11827         struct hclge_dev *hdev = vport->back;
11828         u32 regs_num_32_bit, regs_num_64_bit;
11829         int i, reg_num, separator_num, ret;
11830         u32 *reg = data;
11831
11832         *version = hdev->fw_version;
11833
11834         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11835         if (ret) {
11836                 dev_err(&hdev->pdev->dev,
11837                         "Get register number failed, ret = %d.\n", ret);
11838                 return;
11839         }
11840
11841         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11842
11843         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11844         if (ret) {
11845                 dev_err(&hdev->pdev->dev,
11846                         "Get 32 bit register failed, ret = %d.\n", ret);
11847                 return;
11848         }
11849         reg_num = regs_num_32_bit;
11850         reg += reg_num;
11851         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11852         for (i = 0; i < separator_num; i++)
11853                 *reg++ = SEPARATOR_VALUE;
11854
11855         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11856         if (ret) {
11857                 dev_err(&hdev->pdev->dev,
11858                         "Get 64 bit register failed, ret = %d.\n", ret);
11859                 return;
11860         }
11861         reg_num = regs_num_64_bit * 2;
11862         reg += reg_num;
11863         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11864         for (i = 0; i < separator_num; i++)
11865                 *reg++ = SEPARATOR_VALUE;
11866
11867         ret = hclge_get_dfx_reg(hdev, reg);
11868         if (ret)
11869                 dev_err(&hdev->pdev->dev,
11870                         "Get dfx register failed, ret = %d.\n", ret);
11871 }
11872
11873 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11874 {
11875         struct hclge_set_led_state_cmd *req;
11876         struct hclge_desc desc;
11877         int ret;
11878
11879         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11880
11881         req = (struct hclge_set_led_state_cmd *)desc.data;
11882         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11883                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11884
11885         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11886         if (ret)
11887                 dev_err(&hdev->pdev->dev,
11888                         "Send set led state cmd error, ret =%d\n", ret);
11889
11890         return ret;
11891 }
11892
11893 enum hclge_led_status {
11894         HCLGE_LED_OFF,
11895         HCLGE_LED_ON,
11896         HCLGE_LED_NO_CHANGE = 0xFF,
11897 };
11898
11899 static int hclge_set_led_id(struct hnae3_handle *handle,
11900                             enum ethtool_phys_id_state status)
11901 {
11902         struct hclge_vport *vport = hclge_get_vport(handle);
11903         struct hclge_dev *hdev = vport->back;
11904
11905         switch (status) {
11906         case ETHTOOL_ID_ACTIVE:
11907                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11908         case ETHTOOL_ID_INACTIVE:
11909                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11910         default:
11911                 return -EINVAL;
11912         }
11913 }
11914
11915 static void hclge_get_link_mode(struct hnae3_handle *handle,
11916                                 unsigned long *supported,
11917                                 unsigned long *advertising)
11918 {
11919         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11920         struct hclge_vport *vport = hclge_get_vport(handle);
11921         struct hclge_dev *hdev = vport->back;
11922         unsigned int idx = 0;
11923
11924         for (; idx < size; idx++) {
11925                 supported[idx] = hdev->hw.mac.supported[idx];
11926                 advertising[idx] = hdev->hw.mac.advertising[idx];
11927         }
11928 }
11929
11930 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11931 {
11932         struct hclge_vport *vport = hclge_get_vport(handle);
11933         struct hclge_dev *hdev = vport->back;
11934
11935         return hclge_config_gro(hdev, enable);
11936 }
11937
11938 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11939 {
11940         struct hclge_vport *vport = &hdev->vport[0];
11941         struct hnae3_handle *handle = &vport->nic;
11942         u8 tmp_flags;
11943         int ret;
11944
11945         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11946                 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11947                 vport->last_promisc_flags = vport->overflow_promisc_flags;
11948         }
11949
11950         if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11951                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11952                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11953                                              tmp_flags & HNAE3_MPE);
11954                 if (!ret) {
11955                         clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11956                         hclge_enable_vlan_filter(handle,
11957                                                  tmp_flags & HNAE3_VLAN_FLTR);
11958                 }
11959         }
11960 }
11961
11962 static bool hclge_module_existed(struct hclge_dev *hdev)
11963 {
11964         struct hclge_desc desc;
11965         u32 existed;
11966         int ret;
11967
11968         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11969         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11970         if (ret) {
11971                 dev_err(&hdev->pdev->dev,
11972                         "failed to get SFP exist state, ret = %d\n", ret);
11973                 return false;
11974         }
11975
11976         existed = le32_to_cpu(desc.data[0]);
11977
11978         return existed != 0;
11979 }
11980
11981 /* need 6 bds(total 140 bytes) in one reading
11982  * return the number of bytes actually read, 0 means read failed.
11983  */
11984 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11985                                      u32 len, u8 *data)
11986 {
11987         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11988         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11989         u16 read_len;
11990         u16 copy_len;
11991         int ret;
11992         int i;
11993
11994         /* setup all 6 bds to read module eeprom info. */
11995         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11996                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11997                                            true);
11998
11999                 /* bd0~bd4 need next flag */
12000                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12001                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12002         }
12003
12004         /* setup bd0, this bd contains offset and read length. */
12005         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12006         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12007         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12008         sfp_info_bd0->read_len = cpu_to_le16(read_len);
12009
12010         ret = hclge_cmd_send(&hdev->hw, desc, i);
12011         if (ret) {
12012                 dev_err(&hdev->pdev->dev,
12013                         "failed to get SFP eeprom info, ret = %d\n", ret);
12014                 return 0;
12015         }
12016
12017         /* copy sfp info from bd0 to out buffer. */
12018         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12019         memcpy(data, sfp_info_bd0->data, copy_len);
12020         read_len = copy_len;
12021
12022         /* copy sfp info from bd1~bd5 to out buffer if needed. */
12023         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12024                 if (read_len >= len)
12025                         return read_len;
12026
12027                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12028                 memcpy(data + read_len, desc[i].data, copy_len);
12029                 read_len += copy_len;
12030         }
12031
12032         return read_len;
12033 }
12034
12035 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12036                                    u32 len, u8 *data)
12037 {
12038         struct hclge_vport *vport = hclge_get_vport(handle);
12039         struct hclge_dev *hdev = vport->back;
12040         u32 read_len = 0;
12041         u16 data_len;
12042
12043         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12044                 return -EOPNOTSUPP;
12045
12046         if (!hclge_module_existed(hdev))
12047                 return -ENXIO;
12048
12049         while (read_len < len) {
12050                 data_len = hclge_get_sfp_eeprom_info(hdev,
12051                                                      offset + read_len,
12052                                                      len - read_len,
12053                                                      data + read_len);
12054                 if (!data_len)
12055                         return -EIO;
12056
12057                 read_len += data_len;
12058         }
12059
12060         return 0;
12061 }
12062
12063 static const struct hnae3_ae_ops hclge_ops = {
12064         .init_ae_dev = hclge_init_ae_dev,
12065         .uninit_ae_dev = hclge_uninit_ae_dev,
12066         .flr_prepare = hclge_flr_prepare,
12067         .flr_done = hclge_flr_done,
12068         .init_client_instance = hclge_init_client_instance,
12069         .uninit_client_instance = hclge_uninit_client_instance,
12070         .map_ring_to_vector = hclge_map_ring_to_vector,
12071         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12072         .get_vector = hclge_get_vector,
12073         .put_vector = hclge_put_vector,
12074         .set_promisc_mode = hclge_set_promisc_mode,
12075         .request_update_promisc_mode = hclge_request_update_promisc_mode,
12076         .set_loopback = hclge_set_loopback,
12077         .start = hclge_ae_start,
12078         .stop = hclge_ae_stop,
12079         .client_start = hclge_client_start,
12080         .client_stop = hclge_client_stop,
12081         .get_status = hclge_get_status,
12082         .get_ksettings_an_result = hclge_get_ksettings_an_result,
12083         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12084         .get_media_type = hclge_get_media_type,
12085         .check_port_speed = hclge_check_port_speed,
12086         .get_fec = hclge_get_fec,
12087         .set_fec = hclge_set_fec,
12088         .get_rss_key_size = hclge_get_rss_key_size,
12089         .get_rss = hclge_get_rss,
12090         .set_rss = hclge_set_rss,
12091         .set_rss_tuple = hclge_set_rss_tuple,
12092         .get_rss_tuple = hclge_get_rss_tuple,
12093         .get_tc_size = hclge_get_tc_size,
12094         .get_mac_addr = hclge_get_mac_addr,
12095         .set_mac_addr = hclge_set_mac_addr,
12096         .do_ioctl = hclge_do_ioctl,
12097         .add_uc_addr = hclge_add_uc_addr,
12098         .rm_uc_addr = hclge_rm_uc_addr,
12099         .add_mc_addr = hclge_add_mc_addr,
12100         .rm_mc_addr = hclge_rm_mc_addr,
12101         .set_autoneg = hclge_set_autoneg,
12102         .get_autoneg = hclge_get_autoneg,
12103         .restart_autoneg = hclge_restart_autoneg,
12104         .halt_autoneg = hclge_halt_autoneg,
12105         .get_pauseparam = hclge_get_pauseparam,
12106         .set_pauseparam = hclge_set_pauseparam,
12107         .set_mtu = hclge_set_mtu,
12108         .reset_queue = hclge_reset_tqp,
12109         .get_stats = hclge_get_stats,
12110         .get_mac_stats = hclge_get_mac_stat,
12111         .update_stats = hclge_update_stats,
12112         .get_strings = hclge_get_strings,
12113         .get_sset_count = hclge_get_sset_count,
12114         .get_fw_version = hclge_get_fw_version,
12115         .get_mdix_mode = hclge_get_mdix_mode,
12116         .enable_vlan_filter = hclge_enable_vlan_filter,
12117         .set_vlan_filter = hclge_set_vlan_filter,
12118         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12119         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12120         .reset_event = hclge_reset_event,
12121         .get_reset_level = hclge_get_reset_level,
12122         .set_default_reset_request = hclge_set_def_reset_request,
12123         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12124         .set_channels = hclge_set_channels,
12125         .get_channels = hclge_get_channels,
12126         .get_regs_len = hclge_get_regs_len,
12127         .get_regs = hclge_get_regs,
12128         .set_led_id = hclge_set_led_id,
12129         .get_link_mode = hclge_get_link_mode,
12130         .add_fd_entry = hclge_add_fd_entry,
12131         .del_fd_entry = hclge_del_fd_entry,
12132         .del_all_fd_entries = hclge_del_all_fd_entries,
12133         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12134         .get_fd_rule_info = hclge_get_fd_rule_info,
12135         .get_fd_all_rules = hclge_get_all_rules,
12136         .enable_fd = hclge_enable_fd,
12137         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12138         .dbg_run_cmd = hclge_dbg_run_cmd,
12139         .dbg_read_cmd = hclge_dbg_read_cmd,
12140         .handle_hw_ras_error = hclge_handle_hw_ras_error,
12141         .get_hw_reset_stat = hclge_get_hw_reset_stat,
12142         .ae_dev_resetting = hclge_ae_dev_resetting,
12143         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12144         .set_gro_en = hclge_gro_en,
12145         .get_global_queue_id = hclge_covert_handle_qid_global,
12146         .set_timer_task = hclge_set_timer_task,
12147         .mac_connect_phy = hclge_mac_connect_phy,
12148         .mac_disconnect_phy = hclge_mac_disconnect_phy,
12149         .get_vf_config = hclge_get_vf_config,
12150         .set_vf_link_state = hclge_set_vf_link_state,
12151         .set_vf_spoofchk = hclge_set_vf_spoofchk,
12152         .set_vf_trust = hclge_set_vf_trust,
12153         .set_vf_rate = hclge_set_vf_rate,
12154         .set_vf_mac = hclge_set_vf_mac,
12155         .get_module_eeprom = hclge_get_module_eeprom,
12156         .get_cmdq_stat = hclge_get_cmdq_stat,
12157         .add_cls_flower = hclge_add_cls_flower,
12158         .del_cls_flower = hclge_del_cls_flower,
12159         .cls_flower_active = hclge_is_cls_flower_active,
12160         .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12161         .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12162 };
12163
12164 static struct hnae3_ae_algo ae_algo = {
12165         .ops = &hclge_ops,
12166         .pdev_id_table = ae_algo_pci_tbl,
12167 };
12168
12169 static int hclge_init(void)
12170 {
12171         pr_info("%s is initializing\n", HCLGE_NAME);
12172
12173         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12174         if (!hclge_wq) {
12175                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12176                 return -ENOMEM;
12177         }
12178
12179         hnae3_register_ae_algo(&ae_algo);
12180
12181         return 0;
12182 }
12183
12184 static void hclge_exit(void)
12185 {
12186         hnae3_unregister_ae_algo(&ae_algo);
12187         destroy_workqueue(hclge_wq);
12188 }
12189 module_init(hclge_init);
12190 module_exit(hclge_exit);
12191
12192 MODULE_LICENSE("GPL");
12193 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12194 MODULE_DESCRIPTION("HCLGE Driver");
12195 MODULE_VERSION(HCLGE_MOD_VERSION);