net: hns3: refactor out hclge_fd_get_tuple()
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26
27 #define HCLGE_NAME                      "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30
31 #define HCLGE_BUF_SIZE_UNIT     256U
32 #define HCLGE_BUF_MUL_BY        2
33 #define HCLGE_BUF_DIV_BY        2
34 #define NEED_RESERVE_TC_NUM     2
35 #define BUF_MAX_PERCENT         100
36 #define BUF_RESERVE_PERCENT     90
37
38 #define HCLGE_RESET_MAX_FAIL_CNT        5
39 #define HCLGE_RESET_SYNC_TIME           100
40 #define HCLGE_PF_RESET_SYNC_TIME        20
41 #define HCLGE_PF_RESET_SYNC_CNT         1500
42
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56
57 #define HCLGE_LINK_STATUS_MS    10
58
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67                                                    unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73
74 static struct hnae3_ae_algo ae_algo;
75
76 static struct workqueue_struct *hclge_wq;
77
78 static const struct pci_device_id ae_algo_pci_tbl[] = {
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
87         /* required last entry */
88         {0, }
89 };
90
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94                                          HCLGE_CMDQ_TX_ADDR_H_REG,
95                                          HCLGE_CMDQ_TX_DEPTH_REG,
96                                          HCLGE_CMDQ_TX_TAIL_REG,
97                                          HCLGE_CMDQ_TX_HEAD_REG,
98                                          HCLGE_CMDQ_RX_ADDR_L_REG,
99                                          HCLGE_CMDQ_RX_ADDR_H_REG,
100                                          HCLGE_CMDQ_RX_DEPTH_REG,
101                                          HCLGE_CMDQ_RX_TAIL_REG,
102                                          HCLGE_CMDQ_RX_HEAD_REG,
103                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
104                                          HCLGE_CMDQ_INTR_STS_REG,
105                                          HCLGE_CMDQ_INTR_EN_REG,
106                                          HCLGE_CMDQ_INTR_GEN_REG};
107
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109                                            HCLGE_VECTOR0_OTER_EN_REG,
110                                            HCLGE_MISC_RESET_STS_REG,
111                                            HCLGE_MISC_VECTOR_INT_STS,
112                                            HCLGE_GLOBAL_RESET_REG,
113                                            HCLGE_FUN_RST_ING,
114                                            HCLGE_GRO_EN_REG};
115
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117                                          HCLGE_RING_RX_ADDR_H_REG,
118                                          HCLGE_RING_RX_BD_NUM_REG,
119                                          HCLGE_RING_RX_BD_LENGTH_REG,
120                                          HCLGE_RING_RX_MERGE_EN_REG,
121                                          HCLGE_RING_RX_TAIL_REG,
122                                          HCLGE_RING_RX_HEAD_REG,
123                                          HCLGE_RING_RX_FBD_NUM_REG,
124                                          HCLGE_RING_RX_OFFSET_REG,
125                                          HCLGE_RING_RX_FBD_OFFSET_REG,
126                                          HCLGE_RING_RX_STASH_REG,
127                                          HCLGE_RING_RX_BD_ERR_REG,
128                                          HCLGE_RING_TX_ADDR_L_REG,
129                                          HCLGE_RING_TX_ADDR_H_REG,
130                                          HCLGE_RING_TX_BD_NUM_REG,
131                                          HCLGE_RING_TX_PRIORITY_REG,
132                                          HCLGE_RING_TX_TC_REG,
133                                          HCLGE_RING_TX_MERGE_EN_REG,
134                                          HCLGE_RING_TX_TAIL_REG,
135                                          HCLGE_RING_TX_HEAD_REG,
136                                          HCLGE_RING_TX_FBD_NUM_REG,
137                                          HCLGE_RING_TX_OFFSET_REG,
138                                          HCLGE_RING_TX_EBD_NUM_REG,
139                                          HCLGE_RING_TX_EBD_OFFSET_REG,
140                                          HCLGE_RING_TX_BD_ERR_REG,
141                                          HCLGE_RING_EN_REG};
142
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144                                              HCLGE_TQP_INTR_GL0_REG,
145                                              HCLGE_TQP_INTR_GL1_REG,
146                                              HCLGE_TQP_INTR_GL2_REG,
147                                              HCLGE_TQP_INTR_RL_REG};
148
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
150         "App    Loopback test",
151         "Serdes serial Loopback test",
152         "Serdes parallel Loopback test",
153         "Phy    Loopback test"
154 };
155
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157         {"mac_tx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159         {"mac_rx_mac_pause_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161         {"mac_tx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163         {"mac_rx_control_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165         {"mac_tx_pfc_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167         {"mac_tx_pfc_pri0_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169         {"mac_tx_pfc_pri1_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171         {"mac_tx_pfc_pri2_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173         {"mac_tx_pfc_pri3_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175         {"mac_tx_pfc_pri4_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177         {"mac_tx_pfc_pri5_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179         {"mac_tx_pfc_pri6_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181         {"mac_tx_pfc_pri7_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183         {"mac_rx_pfc_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185         {"mac_rx_pfc_pri0_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187         {"mac_rx_pfc_pri1_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189         {"mac_rx_pfc_pri2_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191         {"mac_rx_pfc_pri3_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193         {"mac_rx_pfc_pri4_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195         {"mac_rx_pfc_pri5_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197         {"mac_rx_pfc_pri6_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199         {"mac_rx_pfc_pri7_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201         {"mac_tx_total_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203         {"mac_tx_total_oct_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205         {"mac_tx_good_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207         {"mac_tx_bad_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209         {"mac_tx_good_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211         {"mac_tx_bad_oct_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213         {"mac_tx_uni_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215         {"mac_tx_multi_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217         {"mac_tx_broad_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219         {"mac_tx_undersize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221         {"mac_tx_oversize_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223         {"mac_tx_64_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225         {"mac_tx_65_127_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227         {"mac_tx_128_255_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229         {"mac_tx_256_511_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231         {"mac_tx_512_1023_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233         {"mac_tx_1024_1518_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235         {"mac_tx_1519_2047_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237         {"mac_tx_2048_4095_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239         {"mac_tx_4096_8191_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241         {"mac_tx_8192_9216_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243         {"mac_tx_9217_12287_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245         {"mac_tx_12288_16383_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247         {"mac_tx_1519_max_good_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249         {"mac_tx_1519_max_bad_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251         {"mac_rx_total_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253         {"mac_rx_total_oct_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255         {"mac_rx_good_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257         {"mac_rx_bad_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259         {"mac_rx_good_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261         {"mac_rx_bad_oct_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263         {"mac_rx_uni_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265         {"mac_rx_multi_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267         {"mac_rx_broad_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269         {"mac_rx_undersize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271         {"mac_rx_oversize_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273         {"mac_rx_64_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275         {"mac_rx_65_127_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277         {"mac_rx_128_255_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279         {"mac_rx_256_511_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281         {"mac_rx_512_1023_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283         {"mac_rx_1024_1518_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285         {"mac_rx_1519_2047_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287         {"mac_rx_2048_4095_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289         {"mac_rx_4096_8191_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291         {"mac_rx_8192_9216_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293         {"mac_rx_9217_12287_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295         {"mac_rx_12288_16383_oct_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297         {"mac_rx_1519_max_good_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299         {"mac_rx_1519_max_bad_pkt_num",
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
301
302         {"mac_tx_fragment_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304         {"mac_tx_undermin_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306         {"mac_tx_jabber_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308         {"mac_tx_err_all_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310         {"mac_tx_from_app_good_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312         {"mac_tx_from_app_bad_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314         {"mac_rx_fragment_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316         {"mac_rx_undermin_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318         {"mac_rx_jabber_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320         {"mac_rx_fcs_err_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322         {"mac_rx_send_app_good_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324         {"mac_rx_send_app_bad_pkt_num",
325                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 };
327
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329         {
330                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333                 .i_port_bitmap = 0x1,
334         },
335 };
336
337 static const u8 hclge_hash_key[] = {
338         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 };
344
345 static const u32 hclge_dfx_bd_offset_list[] = {
346         HCLGE_DFX_BIOS_BD_OFFSET,
347         HCLGE_DFX_SSU_0_BD_OFFSET,
348         HCLGE_DFX_SSU_1_BD_OFFSET,
349         HCLGE_DFX_IGU_BD_OFFSET,
350         HCLGE_DFX_RPU_0_BD_OFFSET,
351         HCLGE_DFX_RPU_1_BD_OFFSET,
352         HCLGE_DFX_NCSI_BD_OFFSET,
353         HCLGE_DFX_RTC_BD_OFFSET,
354         HCLGE_DFX_PPP_BD_OFFSET,
355         HCLGE_DFX_RCB_BD_OFFSET,
356         HCLGE_DFX_TQP_BD_OFFSET,
357         HCLGE_DFX_SSU_2_BD_OFFSET
358 };
359
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361         HCLGE_OPC_DFX_BIOS_COMMON_REG,
362         HCLGE_OPC_DFX_SSU_REG_0,
363         HCLGE_OPC_DFX_SSU_REG_1,
364         HCLGE_OPC_DFX_IGU_EGU_REG,
365         HCLGE_OPC_DFX_RPU_REG_0,
366         HCLGE_OPC_DFX_RPU_REG_1,
367         HCLGE_OPC_DFX_NCSI_REG,
368         HCLGE_OPC_DFX_RTC_REG,
369         HCLGE_OPC_DFX_PPP_REG,
370         HCLGE_OPC_DFX_RCB_REG,
371         HCLGE_OPC_DFX_TQP_REG,
372         HCLGE_OPC_DFX_SSU_REG_2
373 };
374
375 static const struct key_info meta_data_key_info[] = {
376         { PACKET_TYPE_ID, 6},
377         { IP_FRAGEMENT, 1},
378         { ROCE_TYPE, 1},
379         { NEXT_KEY, 5},
380         { VLAN_NUMBER, 2},
381         { SRC_VPORT, 12},
382         { DST_VPORT, 12},
383         { TUNNEL_PACKET, 1},
384 };
385
386 static const struct key_info tuple_key_info[] = {
387         { OUTER_DST_MAC, 48},
388         { OUTER_SRC_MAC, 48},
389         { OUTER_VLAN_TAG_FST, 16},
390         { OUTER_VLAN_TAG_SEC, 16},
391         { OUTER_ETH_TYPE, 16},
392         { OUTER_L2_RSV, 16},
393         { OUTER_IP_TOS, 8},
394         { OUTER_IP_PROTO, 8},
395         { OUTER_SRC_IP, 32},
396         { OUTER_DST_IP, 32},
397         { OUTER_L3_RSV, 16},
398         { OUTER_SRC_PORT, 16},
399         { OUTER_DST_PORT, 16},
400         { OUTER_L4_RSV, 32},
401         { OUTER_TUN_VNI, 24},
402         { OUTER_TUN_FLOW_ID, 8},
403         { INNER_DST_MAC, 48},
404         { INNER_SRC_MAC, 48},
405         { INNER_VLAN_TAG_FST, 16},
406         { INNER_VLAN_TAG_SEC, 16},
407         { INNER_ETH_TYPE, 16},
408         { INNER_L2_RSV, 16},
409         { INNER_IP_TOS, 8},
410         { INNER_IP_PROTO, 8},
411         { INNER_SRC_IP, 32},
412         { INNER_DST_IP, 32},
413         { INNER_L3_RSV, 16},
414         { INNER_SRC_PORT, 16},
415         { INNER_DST_PORT, 16},
416         { INNER_L4_RSV, 32},
417 };
418
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
420 {
421 #define HCLGE_MAC_CMD_NUM 21
422
423         u64 *data = (u64 *)(&hdev->mac_stats);
424         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425         __le64 *desc_data;
426         int i, k, n;
427         int ret;
428
429         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431         if (ret) {
432                 dev_err(&hdev->pdev->dev,
433                         "Get MAC pkt stats fail, status = %d.\n", ret);
434
435                 return ret;
436         }
437
438         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439                 /* for special opcode 0032, only the first desc has the head */
440                 if (unlikely(i == 0)) {
441                         desc_data = (__le64 *)(&desc[i].data[0]);
442                         n = HCLGE_RD_FIRST_STATS_NUM;
443                 } else {
444                         desc_data = (__le64 *)(&desc[i]);
445                         n = HCLGE_RD_OTHER_STATS_NUM;
446                 }
447
448                 for (k = 0; k < n; k++) {
449                         *data += le64_to_cpu(*desc_data);
450                         data++;
451                         desc_data++;
452                 }
453         }
454
455         return 0;
456 }
457
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459 {
460         u64 *data = (u64 *)(&hdev->mac_stats);
461         struct hclge_desc *desc;
462         __le64 *desc_data;
463         u16 i, k, n;
464         int ret;
465
466         /* This may be called inside atomic sections,
467          * so GFP_ATOMIC is more suitalbe here
468          */
469         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
470         if (!desc)
471                 return -ENOMEM;
472
473         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475         if (ret) {
476                 kfree(desc);
477                 return ret;
478         }
479
480         for (i = 0; i < desc_num; i++) {
481                 /* for special opcode 0034, only the first desc has the head */
482                 if (i == 0) {
483                         desc_data = (__le64 *)(&desc[i].data[0]);
484                         n = HCLGE_RD_FIRST_STATS_NUM;
485                 } else {
486                         desc_data = (__le64 *)(&desc[i]);
487                         n = HCLGE_RD_OTHER_STATS_NUM;
488                 }
489
490                 for (k = 0; k < n; k++) {
491                         *data += le64_to_cpu(*desc_data);
492                         data++;
493                         desc_data++;
494                 }
495         }
496
497         kfree(desc);
498
499         return 0;
500 }
501
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503 {
504         struct hclge_desc desc;
505         __le32 *desc_data;
506         u32 reg_num;
507         int ret;
508
509         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511         if (ret)
512                 return ret;
513
514         desc_data = (__le32 *)(&desc.data[0]);
515         reg_num = le32_to_cpu(*desc_data);
516
517         *desc_num = 1 + ((reg_num - 3) >> 2) +
518                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519
520         return 0;
521 }
522
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
524 {
525         u32 desc_num;
526         int ret;
527
528         ret = hclge_mac_query_reg_num(hdev, &desc_num);
529
530         /* The firmware supports the new statistics acquisition method */
531         if (!ret)
532                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533         else if (ret == -EOPNOTSUPP)
534                 ret = hclge_mac_update_stats_defective(hdev);
535         else
536                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537
538         return ret;
539 }
540
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542 {
543         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544         struct hclge_vport *vport = hclge_get_vport(handle);
545         struct hclge_dev *hdev = vport->back;
546         struct hnae3_queue *queue;
547         struct hclge_desc desc[1];
548         struct hclge_tqp *tqp;
549         int ret, i;
550
551         for (i = 0; i < kinfo->num_tqps; i++) {
552                 queue = handle->kinfo.tqp[i];
553                 tqp = container_of(queue, struct hclge_tqp, q);
554                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
556                                            true);
557
558                 desc[0].data[0] = cpu_to_le32(tqp->index);
559                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
560                 if (ret) {
561                         dev_err(&hdev->pdev->dev,
562                                 "Query tqp stat fail, status = %d,queue = %d\n",
563                                 ret, i);
564                         return ret;
565                 }
566                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567                         le32_to_cpu(desc[0].data[1]);
568         }
569
570         for (i = 0; i < kinfo->num_tqps; i++) {
571                 queue = handle->kinfo.tqp[i];
572                 tqp = container_of(queue, struct hclge_tqp, q);
573                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574                 hclge_cmd_setup_basic_desc(&desc[0],
575                                            HCLGE_OPC_QUERY_TX_STATS,
576                                            true);
577
578                 desc[0].data[0] = cpu_to_le32(tqp->index);
579                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
580                 if (ret) {
581                         dev_err(&hdev->pdev->dev,
582                                 "Query tqp stat fail, status = %d,queue = %d\n",
583                                 ret, i);
584                         return ret;
585                 }
586                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587                         le32_to_cpu(desc[0].data[1]);
588         }
589
590         return 0;
591 }
592
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594 {
595         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596         struct hclge_tqp *tqp;
597         u64 *buff = data;
598         int i;
599
600         for (i = 0; i < kinfo->num_tqps; i++) {
601                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603         }
604
605         for (i = 0; i < kinfo->num_tqps; i++) {
606                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
608         }
609
610         return buff;
611 }
612
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614 {
615         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616
617         /* each tqp has TX & RX two queues */
618         return kinfo->num_tqps * (2);
619 }
620
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622 {
623         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624         u8 *buff = data;
625         int i;
626
627         for (i = 0; i < kinfo->num_tqps; i++) {
628                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629                         struct hclge_tqp, q);
630                 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
631                          tqp->index);
632                 buff = buff + ETH_GSTRING_LEN;
633         }
634
635         for (i = 0; i < kinfo->num_tqps; i++) {
636                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637                         struct hclge_tqp, q);
638                 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
639                          tqp->index);
640                 buff = buff + ETH_GSTRING_LEN;
641         }
642
643         return buff;
644 }
645
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647                                  const struct hclge_comm_stats_str strs[],
648                                  int size, u64 *data)
649 {
650         u64 *buf = data;
651         u32 i;
652
653         for (i = 0; i < size; i++)
654                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655
656         return buf + size;
657 }
658
659 static u8 *hclge_comm_get_strings(u32 stringset,
660                                   const struct hclge_comm_stats_str strs[],
661                                   int size, u8 *data)
662 {
663         char *buff = (char *)data;
664         u32 i;
665
666         if (stringset != ETH_SS_STATS)
667                 return buff;
668
669         for (i = 0; i < size; i++) {
670                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671                 buff = buff + ETH_GSTRING_LEN;
672         }
673
674         return (u8 *)buff;
675 }
676
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678 {
679         struct hnae3_handle *handle;
680         int status;
681
682         handle = &hdev->vport[0].nic;
683         if (handle->client) {
684                 status = hclge_tqps_update_stats(handle);
685                 if (status) {
686                         dev_err(&hdev->pdev->dev,
687                                 "Update TQPS stats fail, status = %d.\n",
688                                 status);
689                 }
690         }
691
692         status = hclge_mac_update_stats(hdev);
693         if (status)
694                 dev_err(&hdev->pdev->dev,
695                         "Update MAC stats fail, status = %d.\n", status);
696 }
697
698 static void hclge_update_stats(struct hnae3_handle *handle,
699                                struct net_device_stats *net_stats)
700 {
701         struct hclge_vport *vport = hclge_get_vport(handle);
702         struct hclge_dev *hdev = vport->back;
703         int status;
704
705         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706                 return;
707
708         status = hclge_mac_update_stats(hdev);
709         if (status)
710                 dev_err(&hdev->pdev->dev,
711                         "Update MAC stats fail, status = %d.\n",
712                         status);
713
714         status = hclge_tqps_update_stats(handle);
715         if (status)
716                 dev_err(&hdev->pdev->dev,
717                         "Update TQPS stats fail, status = %d.\n",
718                         status);
719
720         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 }
722
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724 {
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726                 HNAE3_SUPPORT_PHY_LOOPBACK |\
727                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
729
730         struct hclge_vport *vport = hclge_get_vport(handle);
731         struct hclge_dev *hdev = vport->back;
732         int count = 0;
733
734         /* Loopback test support rules:
735          * mac: only GE mode support
736          * serdes: all mac mode will support include GE/XGE/LGE/CGE
737          * phy: only support when phy device exist on board
738          */
739         if (stringset == ETH_SS_TEST) {
740                 /* clear loopback bit flags at first */
741                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746                         count += 1;
747                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
748                 }
749
750                 count += 2;
751                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
753
754                 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
755                      hdev->hw.mac.phydev->drv->set_loopback) ||
756                     hnae3_dev_phy_imp_supported(hdev)) {
757                         count += 1;
758                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
759                 }
760
761         } else if (stringset == ETH_SS_STATS) {
762                 count = ARRAY_SIZE(g_mac_stats_string) +
763                         hclge_tqps_get_sset_count(handle, stringset);
764         }
765
766         return count;
767 }
768
769 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
770                               u8 *data)
771 {
772         u8 *p = (char *)data;
773         int size;
774
775         if (stringset == ETH_SS_STATS) {
776                 size = ARRAY_SIZE(g_mac_stats_string);
777                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
778                                            size, p);
779                 p = hclge_tqps_get_strings(handle, p);
780         } else if (stringset == ETH_SS_TEST) {
781                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
782                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
783                                ETH_GSTRING_LEN);
784                         p += ETH_GSTRING_LEN;
785                 }
786                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
787                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
788                                ETH_GSTRING_LEN);
789                         p += ETH_GSTRING_LEN;
790                 }
791                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
792                         memcpy(p,
793                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
794                                ETH_GSTRING_LEN);
795                         p += ETH_GSTRING_LEN;
796                 }
797                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
798                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
799                                ETH_GSTRING_LEN);
800                         p += ETH_GSTRING_LEN;
801                 }
802         }
803 }
804
805 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
806 {
807         struct hclge_vport *vport = hclge_get_vport(handle);
808         struct hclge_dev *hdev = vport->back;
809         u64 *p;
810
811         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
812                                  ARRAY_SIZE(g_mac_stats_string), data);
813         p = hclge_tqps_get_stats(handle, p);
814 }
815
816 static void hclge_get_mac_stat(struct hnae3_handle *handle,
817                                struct hns3_mac_stats *mac_stats)
818 {
819         struct hclge_vport *vport = hclge_get_vport(handle);
820         struct hclge_dev *hdev = vport->back;
821
822         hclge_update_stats(handle, NULL);
823
824         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
825         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
826 }
827
828 static int hclge_parse_func_status(struct hclge_dev *hdev,
829                                    struct hclge_func_status_cmd *status)
830 {
831 #define HCLGE_MAC_ID_MASK       0xF
832
833         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
834                 return -EINVAL;
835
836         /* Set the pf to main pf */
837         if (status->pf_state & HCLGE_PF_STATE_MAIN)
838                 hdev->flag |= HCLGE_FLAG_MAIN;
839         else
840                 hdev->flag &= ~HCLGE_FLAG_MAIN;
841
842         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
843         return 0;
844 }
845
846 static int hclge_query_function_status(struct hclge_dev *hdev)
847 {
848 #define HCLGE_QUERY_MAX_CNT     5
849
850         struct hclge_func_status_cmd *req;
851         struct hclge_desc desc;
852         int timeout = 0;
853         int ret;
854
855         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
856         req = (struct hclge_func_status_cmd *)desc.data;
857
858         do {
859                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860                 if (ret) {
861                         dev_err(&hdev->pdev->dev,
862                                 "query function status failed %d.\n", ret);
863                         return ret;
864                 }
865
866                 /* Check pf reset is done */
867                 if (req->pf_state)
868                         break;
869                 usleep_range(1000, 2000);
870         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
871
872         return hclge_parse_func_status(hdev, req);
873 }
874
875 static int hclge_query_pf_resource(struct hclge_dev *hdev)
876 {
877         struct hclge_pf_res_cmd *req;
878         struct hclge_desc desc;
879         int ret;
880
881         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
882         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
883         if (ret) {
884                 dev_err(&hdev->pdev->dev,
885                         "query pf resource failed %d.\n", ret);
886                 return ret;
887         }
888
889         req = (struct hclge_pf_res_cmd *)desc.data;
890         hdev->num_tqps = le16_to_cpu(req->tqp_num) +
891                          le16_to_cpu(req->ext_tqp_num);
892         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
893
894         if (req->tx_buf_size)
895                 hdev->tx_buf_size =
896                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
897         else
898                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
899
900         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
901
902         if (req->dv_buf_size)
903                 hdev->dv_buf_size =
904                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
905         else
906                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
907
908         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
909
910         hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
911         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
912                 dev_err(&hdev->pdev->dev,
913                         "only %u msi resources available, not enough for pf(min:2).\n",
914                         hdev->num_nic_msi);
915                 return -EINVAL;
916         }
917
918         if (hnae3_dev_roce_supported(hdev)) {
919                 hdev->num_roce_msi =
920                         le16_to_cpu(req->pf_intr_vector_number_roce);
921
922                 /* PF should have NIC vectors and Roce vectors,
923                  * NIC vectors are queued before Roce vectors.
924                  */
925                 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
926         } else {
927                 hdev->num_msi = hdev->num_nic_msi;
928         }
929
930         return 0;
931 }
932
933 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
934 {
935         switch (speed_cmd) {
936         case 6:
937                 *speed = HCLGE_MAC_SPEED_10M;
938                 break;
939         case 7:
940                 *speed = HCLGE_MAC_SPEED_100M;
941                 break;
942         case 0:
943                 *speed = HCLGE_MAC_SPEED_1G;
944                 break;
945         case 1:
946                 *speed = HCLGE_MAC_SPEED_10G;
947                 break;
948         case 2:
949                 *speed = HCLGE_MAC_SPEED_25G;
950                 break;
951         case 3:
952                 *speed = HCLGE_MAC_SPEED_40G;
953                 break;
954         case 4:
955                 *speed = HCLGE_MAC_SPEED_50G;
956                 break;
957         case 5:
958                 *speed = HCLGE_MAC_SPEED_100G;
959                 break;
960         case 8:
961                 *speed = HCLGE_MAC_SPEED_200G;
962                 break;
963         default:
964                 return -EINVAL;
965         }
966
967         return 0;
968 }
969
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
971 {
972         struct hclge_vport *vport = hclge_get_vport(handle);
973         struct hclge_dev *hdev = vport->back;
974         u32 speed_ability = hdev->hw.mac.speed_ability;
975         u32 speed_bit = 0;
976
977         switch (speed) {
978         case HCLGE_MAC_SPEED_10M:
979                 speed_bit = HCLGE_SUPPORT_10M_BIT;
980                 break;
981         case HCLGE_MAC_SPEED_100M:
982                 speed_bit = HCLGE_SUPPORT_100M_BIT;
983                 break;
984         case HCLGE_MAC_SPEED_1G:
985                 speed_bit = HCLGE_SUPPORT_1G_BIT;
986                 break;
987         case HCLGE_MAC_SPEED_10G:
988                 speed_bit = HCLGE_SUPPORT_10G_BIT;
989                 break;
990         case HCLGE_MAC_SPEED_25G:
991                 speed_bit = HCLGE_SUPPORT_25G_BIT;
992                 break;
993         case HCLGE_MAC_SPEED_40G:
994                 speed_bit = HCLGE_SUPPORT_40G_BIT;
995                 break;
996         case HCLGE_MAC_SPEED_50G:
997                 speed_bit = HCLGE_SUPPORT_50G_BIT;
998                 break;
999         case HCLGE_MAC_SPEED_100G:
1000                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1001                 break;
1002         case HCLGE_MAC_SPEED_200G:
1003                 speed_bit = HCLGE_SUPPORT_200G_BIT;
1004                 break;
1005         default:
1006                 return -EINVAL;
1007         }
1008
1009         if (speed_bit & speed_ability)
1010                 return 0;
1011
1012         return -EINVAL;
1013 }
1014
1015 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1016 {
1017         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1018                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1019                                  mac->supported);
1020         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1021                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1022                                  mac->supported);
1023         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1024                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1025                                  mac->supported);
1026         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1027                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1028                                  mac->supported);
1029         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1030                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031                                  mac->supported);
1032         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1033                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1034                                  mac->supported);
1035 }
1036
1037 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1038 {
1039         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1040                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1041                                  mac->supported);
1042         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1044                                  mac->supported);
1045         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1047                                  mac->supported);
1048         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1050                                  mac->supported);
1051         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1052                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1053                                  mac->supported);
1054         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1055                 linkmode_set_bit(
1056                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1057                         mac->supported);
1058 }
1059
1060 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1061 {
1062         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1063                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1064                                  mac->supported);
1065         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1066                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1067                                  mac->supported);
1068         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1069                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1070                                  mac->supported);
1071         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1072                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1073                                  mac->supported);
1074         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1075                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1076                                  mac->supported);
1077         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1078                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1079                                  mac->supported);
1080 }
1081
1082 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1083 {
1084         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1085                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1086                                  mac->supported);
1087         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1089                                  mac->supported);
1090         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1092                                  mac->supported);
1093         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1095                                  mac->supported);
1096         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1098                                  mac->supported);
1099         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1101                                  mac->supported);
1102         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1104                                  mac->supported);
1105 }
1106
1107 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1108 {
1109         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1110         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1111
1112         switch (mac->speed) {
1113         case HCLGE_MAC_SPEED_10G:
1114         case HCLGE_MAC_SPEED_40G:
1115                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1116                                  mac->supported);
1117                 mac->fec_ability =
1118                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1119                 break;
1120         case HCLGE_MAC_SPEED_25G:
1121         case HCLGE_MAC_SPEED_50G:
1122                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1123                                  mac->supported);
1124                 mac->fec_ability =
1125                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1126                         BIT(HNAE3_FEC_AUTO);
1127                 break;
1128         case HCLGE_MAC_SPEED_100G:
1129         case HCLGE_MAC_SPEED_200G:
1130                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1131                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1132                 break;
1133         default:
1134                 mac->fec_ability = 0;
1135                 break;
1136         }
1137 }
1138
1139 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1140                                         u16 speed_ability)
1141 {
1142         struct hclge_mac *mac = &hdev->hw.mac;
1143
1144         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1145                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1146                                  mac->supported);
1147
1148         hclge_convert_setting_sr(mac, speed_ability);
1149         hclge_convert_setting_lr(mac, speed_ability);
1150         hclge_convert_setting_cr(mac, speed_ability);
1151         if (hnae3_dev_fec_supported(hdev))
1152                 hclge_convert_setting_fec(mac);
1153
1154         if (hnae3_dev_pause_supported(hdev))
1155                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1156
1157         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1158         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1159 }
1160
1161 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1162                                             u16 speed_ability)
1163 {
1164         struct hclge_mac *mac = &hdev->hw.mac;
1165
1166         hclge_convert_setting_kr(mac, speed_ability);
1167         if (hnae3_dev_fec_supported(hdev))
1168                 hclge_convert_setting_fec(mac);
1169
1170         if (hnae3_dev_pause_supported(hdev))
1171                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1172
1173         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1174         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1175 }
1176
1177 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1178                                          u16 speed_ability)
1179 {
1180         unsigned long *supported = hdev->hw.mac.supported;
1181
1182         /* default to support all speed for GE port */
1183         if (!speed_ability)
1184                 speed_ability = HCLGE_SUPPORT_GE;
1185
1186         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1187                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1188                                  supported);
1189
1190         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1191                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1192                                  supported);
1193                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1194                                  supported);
1195         }
1196
1197         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1198                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1199                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1200         }
1201
1202         if (hnae3_dev_pause_supported(hdev)) {
1203                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1204                 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1205         }
1206
1207         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1208         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1209 }
1210
1211 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1212 {
1213         u8 media_type = hdev->hw.mac.media_type;
1214
1215         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1216                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1217         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1218                 hclge_parse_copper_link_mode(hdev, speed_ability);
1219         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1220                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1221 }
1222
1223 static u32 hclge_get_max_speed(u16 speed_ability)
1224 {
1225         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1226                 return HCLGE_MAC_SPEED_200G;
1227
1228         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1229                 return HCLGE_MAC_SPEED_100G;
1230
1231         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1232                 return HCLGE_MAC_SPEED_50G;
1233
1234         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1235                 return HCLGE_MAC_SPEED_40G;
1236
1237         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1238                 return HCLGE_MAC_SPEED_25G;
1239
1240         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1241                 return HCLGE_MAC_SPEED_10G;
1242
1243         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1244                 return HCLGE_MAC_SPEED_1G;
1245
1246         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1247                 return HCLGE_MAC_SPEED_100M;
1248
1249         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1250                 return HCLGE_MAC_SPEED_10M;
1251
1252         return HCLGE_MAC_SPEED_1G;
1253 }
1254
1255 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1256 {
1257 #define SPEED_ABILITY_EXT_SHIFT                 8
1258
1259         struct hclge_cfg_param_cmd *req;
1260         u64 mac_addr_tmp_high;
1261         u16 speed_ability_ext;
1262         u64 mac_addr_tmp;
1263         unsigned int i;
1264
1265         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1266
1267         /* get the configuration */
1268         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1269                                               HCLGE_CFG_VMDQ_M,
1270                                               HCLGE_CFG_VMDQ_S);
1271         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1272                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1273         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1274                                             HCLGE_CFG_TQP_DESC_N_M,
1275                                             HCLGE_CFG_TQP_DESC_N_S);
1276
1277         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1278                                         HCLGE_CFG_PHY_ADDR_M,
1279                                         HCLGE_CFG_PHY_ADDR_S);
1280         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1281                                           HCLGE_CFG_MEDIA_TP_M,
1282                                           HCLGE_CFG_MEDIA_TP_S);
1283         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1284                                           HCLGE_CFG_RX_BUF_LEN_M,
1285                                           HCLGE_CFG_RX_BUF_LEN_S);
1286         /* get mac_address */
1287         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1288         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1289                                             HCLGE_CFG_MAC_ADDR_H_M,
1290                                             HCLGE_CFG_MAC_ADDR_H_S);
1291
1292         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1293
1294         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1295                                              HCLGE_CFG_DEFAULT_SPEED_M,
1296                                              HCLGE_CFG_DEFAULT_SPEED_S);
1297         cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1298                                                HCLGE_CFG_RSS_SIZE_M,
1299                                                HCLGE_CFG_RSS_SIZE_S);
1300
1301         for (i = 0; i < ETH_ALEN; i++)
1302                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1303
1304         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1305         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1306
1307         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1308                                              HCLGE_CFG_SPEED_ABILITY_M,
1309                                              HCLGE_CFG_SPEED_ABILITY_S);
1310         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1311                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1312                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1313         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1314
1315         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1316                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1317                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1318         if (!cfg->umv_space)
1319                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1320
1321         cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1322                                                HCLGE_CFG_PF_RSS_SIZE_M,
1323                                                HCLGE_CFG_PF_RSS_SIZE_S);
1324
1325         /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1326          * power of 2, instead of reading out directly. This would
1327          * be more flexible for future changes and expansions.
1328          * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1329          * it does not make sense if PF's field is 0. In this case, PF and VF
1330          * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1331          */
1332         cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1333                                1U << cfg->pf_rss_size_max :
1334                                cfg->vf_rss_size_max;
1335 }
1336
1337 /* hclge_get_cfg: query the static parameter from flash
1338  * @hdev: pointer to struct hclge_dev
1339  * @hcfg: the config structure to be getted
1340  */
1341 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1342 {
1343         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1344         struct hclge_cfg_param_cmd *req;
1345         unsigned int i;
1346         int ret;
1347
1348         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1349                 u32 offset = 0;
1350
1351                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1352                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1353                                            true);
1354                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1355                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1356                 /* Len should be united by 4 bytes when send to hardware */
1357                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1358                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1359                 req->offset = cpu_to_le32(offset);
1360         }
1361
1362         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1363         if (ret) {
1364                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1365                 return ret;
1366         }
1367
1368         hclge_parse_cfg(hcfg, desc);
1369
1370         return 0;
1371 }
1372
1373 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1374 {
1375 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1376
1377         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1378
1379         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1380         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1381         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1382         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1383         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1384         ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1385         ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1386 }
1387
1388 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1389                                   struct hclge_desc *desc)
1390 {
1391         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1392         struct hclge_dev_specs_0_cmd *req0;
1393         struct hclge_dev_specs_1_cmd *req1;
1394
1395         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1396         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1397
1398         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1399         ae_dev->dev_specs.rss_ind_tbl_size =
1400                 le16_to_cpu(req0->rss_ind_tbl_size);
1401         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1402         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1403         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1404         ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1405         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1406         ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1407 }
1408
1409 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1410 {
1411         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1412
1413         if (!dev_specs->max_non_tso_bd_num)
1414                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1415         if (!dev_specs->rss_ind_tbl_size)
1416                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417         if (!dev_specs->rss_key_size)
1418                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1419         if (!dev_specs->max_tm_rate)
1420                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1421         if (!dev_specs->max_qset_num)
1422                 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1423         if (!dev_specs->max_int_gl)
1424                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1425         if (!dev_specs->max_frm_size)
1426                 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1427 }
1428
1429 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1430 {
1431         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1432         int ret;
1433         int i;
1434
1435         /* set default specifications as devices lower than version V3 do not
1436          * support querying specifications from firmware.
1437          */
1438         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1439                 hclge_set_default_dev_specs(hdev);
1440                 return 0;
1441         }
1442
1443         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1444                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1445                                            true);
1446                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1447         }
1448         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1449
1450         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1451         if (ret)
1452                 return ret;
1453
1454         hclge_parse_dev_specs(hdev, desc);
1455         hclge_check_dev_specs(hdev);
1456
1457         return 0;
1458 }
1459
1460 static int hclge_get_cap(struct hclge_dev *hdev)
1461 {
1462         int ret;
1463
1464         ret = hclge_query_function_status(hdev);
1465         if (ret) {
1466                 dev_err(&hdev->pdev->dev,
1467                         "query function status error %d.\n", ret);
1468                 return ret;
1469         }
1470
1471         /* get pf resource */
1472         return hclge_query_pf_resource(hdev);
1473 }
1474
1475 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1476 {
1477 #define HCLGE_MIN_TX_DESC       64
1478 #define HCLGE_MIN_RX_DESC       64
1479
1480         if (!is_kdump_kernel())
1481                 return;
1482
1483         dev_info(&hdev->pdev->dev,
1484                  "Running kdump kernel. Using minimal resources\n");
1485
1486         /* minimal queue pairs equals to the number of vports */
1487         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1488         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1489         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1490 }
1491
1492 static int hclge_configure(struct hclge_dev *hdev)
1493 {
1494         struct hclge_cfg cfg;
1495         unsigned int i;
1496         int ret;
1497
1498         ret = hclge_get_cfg(hdev, &cfg);
1499         if (ret)
1500                 return ret;
1501
1502         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1503         hdev->base_tqp_pid = 0;
1504         hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1505         hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1506         hdev->rx_buf_len = cfg.rx_buf_len;
1507         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1508         hdev->hw.mac.media_type = cfg.media_type;
1509         hdev->hw.mac.phy_addr = cfg.phy_addr;
1510         hdev->num_tx_desc = cfg.tqp_desc_num;
1511         hdev->num_rx_desc = cfg.tqp_desc_num;
1512         hdev->tm_info.num_pg = 1;
1513         hdev->tc_max = cfg.tc_num;
1514         hdev->tm_info.hw_pfc_map = 0;
1515         hdev->wanted_umv_size = cfg.umv_space;
1516
1517         if (hnae3_dev_fd_supported(hdev)) {
1518                 hdev->fd_en = true;
1519                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1520         }
1521
1522         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1523         if (ret) {
1524                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1525                         cfg.default_speed, ret);
1526                 return ret;
1527         }
1528
1529         hclge_parse_link_mode(hdev, cfg.speed_ability);
1530
1531         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1532
1533         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1534             (hdev->tc_max < 1)) {
1535                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1536                          hdev->tc_max);
1537                 hdev->tc_max = 1;
1538         }
1539
1540         /* Dev does not support DCB */
1541         if (!hnae3_dev_dcb_supported(hdev)) {
1542                 hdev->tc_max = 1;
1543                 hdev->pfc_max = 0;
1544         } else {
1545                 hdev->pfc_max = hdev->tc_max;
1546         }
1547
1548         hdev->tm_info.num_tc = 1;
1549
1550         /* Currently not support uncontiuous tc */
1551         for (i = 0; i < hdev->tm_info.num_tc; i++)
1552                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1553
1554         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1555
1556         hclge_init_kdump_kernel_config(hdev);
1557
1558         /* Set the init affinity based on pci func number */
1559         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1560         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1561         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1562                         &hdev->affinity_mask);
1563
1564         return ret;
1565 }
1566
1567 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1568                             u16 tso_mss_max)
1569 {
1570         struct hclge_cfg_tso_status_cmd *req;
1571         struct hclge_desc desc;
1572
1573         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1574
1575         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1576         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1577         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1578
1579         return hclge_cmd_send(&hdev->hw, &desc, 1);
1580 }
1581
1582 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1583 {
1584         struct hclge_cfg_gro_status_cmd *req;
1585         struct hclge_desc desc;
1586         int ret;
1587
1588         if (!hnae3_dev_gro_supported(hdev))
1589                 return 0;
1590
1591         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1592         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1593
1594         req->gro_en = en ? 1 : 0;
1595
1596         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1597         if (ret)
1598                 dev_err(&hdev->pdev->dev,
1599                         "GRO hardware config cmd failed, ret = %d\n", ret);
1600
1601         return ret;
1602 }
1603
1604 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1605 {
1606         struct hclge_tqp *tqp;
1607         int i;
1608
1609         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1610                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1611         if (!hdev->htqp)
1612                 return -ENOMEM;
1613
1614         tqp = hdev->htqp;
1615
1616         for (i = 0; i < hdev->num_tqps; i++) {
1617                 tqp->dev = &hdev->pdev->dev;
1618                 tqp->index = i;
1619
1620                 tqp->q.ae_algo = &ae_algo;
1621                 tqp->q.buf_size = hdev->rx_buf_len;
1622                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1623                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1624
1625                 /* need an extended offset to configure queues >=
1626                  * HCLGE_TQP_MAX_SIZE_DEV_V2
1627                  */
1628                 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1629                         tqp->q.io_base = hdev->hw.io_base +
1630                                          HCLGE_TQP_REG_OFFSET +
1631                                          i * HCLGE_TQP_REG_SIZE;
1632                 else
1633                         tqp->q.io_base = hdev->hw.io_base +
1634                                          HCLGE_TQP_REG_OFFSET +
1635                                          HCLGE_TQP_EXT_REG_OFFSET +
1636                                          (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1637                                          HCLGE_TQP_REG_SIZE;
1638
1639                 tqp++;
1640         }
1641
1642         return 0;
1643 }
1644
1645 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1646                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1647 {
1648         struct hclge_tqp_map_cmd *req;
1649         struct hclge_desc desc;
1650         int ret;
1651
1652         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1653
1654         req = (struct hclge_tqp_map_cmd *)desc.data;
1655         req->tqp_id = cpu_to_le16(tqp_pid);
1656         req->tqp_vf = func_id;
1657         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1658         if (!is_pf)
1659                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1660         req->tqp_vid = cpu_to_le16(tqp_vid);
1661
1662         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1663         if (ret)
1664                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1665
1666         return ret;
1667 }
1668
1669 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1670 {
1671         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1672         struct hclge_dev *hdev = vport->back;
1673         int i, alloced;
1674
1675         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1676              alloced < num_tqps; i++) {
1677                 if (!hdev->htqp[i].alloced) {
1678                         hdev->htqp[i].q.handle = &vport->nic;
1679                         hdev->htqp[i].q.tqp_index = alloced;
1680                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1681                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1682                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1683                         hdev->htqp[i].alloced = true;
1684                         alloced++;
1685                 }
1686         }
1687         vport->alloc_tqps = alloced;
1688         kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1689                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1690
1691         /* ensure one to one mapping between irq and queue at default */
1692         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1693                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1694
1695         return 0;
1696 }
1697
1698 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1699                             u16 num_tx_desc, u16 num_rx_desc)
1700
1701 {
1702         struct hnae3_handle *nic = &vport->nic;
1703         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1704         struct hclge_dev *hdev = vport->back;
1705         int ret;
1706
1707         kinfo->num_tx_desc = num_tx_desc;
1708         kinfo->num_rx_desc = num_rx_desc;
1709
1710         kinfo->rx_buf_len = hdev->rx_buf_len;
1711
1712         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1713                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1714         if (!kinfo->tqp)
1715                 return -ENOMEM;
1716
1717         ret = hclge_assign_tqp(vport, num_tqps);
1718         if (ret)
1719                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1720
1721         return ret;
1722 }
1723
1724 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1725                                   struct hclge_vport *vport)
1726 {
1727         struct hnae3_handle *nic = &vport->nic;
1728         struct hnae3_knic_private_info *kinfo;
1729         u16 i;
1730
1731         kinfo = &nic->kinfo;
1732         for (i = 0; i < vport->alloc_tqps; i++) {
1733                 struct hclge_tqp *q =
1734                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1735                 bool is_pf;
1736                 int ret;
1737
1738                 is_pf = !(vport->vport_id);
1739                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1740                                              i, is_pf);
1741                 if (ret)
1742                         return ret;
1743         }
1744
1745         return 0;
1746 }
1747
1748 static int hclge_map_tqp(struct hclge_dev *hdev)
1749 {
1750         struct hclge_vport *vport = hdev->vport;
1751         u16 i, num_vport;
1752
1753         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1754         for (i = 0; i < num_vport; i++) {
1755                 int ret;
1756
1757                 ret = hclge_map_tqp_to_vport(hdev, vport);
1758                 if (ret)
1759                         return ret;
1760
1761                 vport++;
1762         }
1763
1764         return 0;
1765 }
1766
1767 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1768 {
1769         struct hnae3_handle *nic = &vport->nic;
1770         struct hclge_dev *hdev = vport->back;
1771         int ret;
1772
1773         nic->pdev = hdev->pdev;
1774         nic->ae_algo = &ae_algo;
1775         nic->numa_node_mask = hdev->numa_node_mask;
1776
1777         ret = hclge_knic_setup(vport, num_tqps,
1778                                hdev->num_tx_desc, hdev->num_rx_desc);
1779         if (ret)
1780                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1781
1782         return ret;
1783 }
1784
1785 static int hclge_alloc_vport(struct hclge_dev *hdev)
1786 {
1787         struct pci_dev *pdev = hdev->pdev;
1788         struct hclge_vport *vport;
1789         u32 tqp_main_vport;
1790         u32 tqp_per_vport;
1791         int num_vport, i;
1792         int ret;
1793
1794         /* We need to alloc a vport for main NIC of PF */
1795         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1796
1797         if (hdev->num_tqps < num_vport) {
1798                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1799                         hdev->num_tqps, num_vport);
1800                 return -EINVAL;
1801         }
1802
1803         /* Alloc the same number of TQPs for every vport */
1804         tqp_per_vport = hdev->num_tqps / num_vport;
1805         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1806
1807         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1808                              GFP_KERNEL);
1809         if (!vport)
1810                 return -ENOMEM;
1811
1812         hdev->vport = vport;
1813         hdev->num_alloc_vport = num_vport;
1814
1815         if (IS_ENABLED(CONFIG_PCI_IOV))
1816                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1817
1818         for (i = 0; i < num_vport; i++) {
1819                 vport->back = hdev;
1820                 vport->vport_id = i;
1821                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1822                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1823                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1824                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1825                 INIT_LIST_HEAD(&vport->vlan_list);
1826                 INIT_LIST_HEAD(&vport->uc_mac_list);
1827                 INIT_LIST_HEAD(&vport->mc_mac_list);
1828                 spin_lock_init(&vport->mac_list_lock);
1829
1830                 if (i == 0)
1831                         ret = hclge_vport_setup(vport, tqp_main_vport);
1832                 else
1833                         ret = hclge_vport_setup(vport, tqp_per_vport);
1834                 if (ret) {
1835                         dev_err(&pdev->dev,
1836                                 "vport setup failed for vport %d, %d\n",
1837                                 i, ret);
1838                         return ret;
1839                 }
1840
1841                 vport++;
1842         }
1843
1844         return 0;
1845 }
1846
1847 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1848                                     struct hclge_pkt_buf_alloc *buf_alloc)
1849 {
1850 /* TX buffer size is unit by 128 byte */
1851 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1852 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1853         struct hclge_tx_buff_alloc_cmd *req;
1854         struct hclge_desc desc;
1855         int ret;
1856         u8 i;
1857
1858         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1859
1860         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1861         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1862                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1863
1864                 req->tx_pkt_buff[i] =
1865                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1866                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1867         }
1868
1869         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1870         if (ret)
1871                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1872                         ret);
1873
1874         return ret;
1875 }
1876
1877 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1878                                  struct hclge_pkt_buf_alloc *buf_alloc)
1879 {
1880         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1881
1882         if (ret)
1883                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1884
1885         return ret;
1886 }
1887
1888 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1889 {
1890         unsigned int i;
1891         u32 cnt = 0;
1892
1893         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1894                 if (hdev->hw_tc_map & BIT(i))
1895                         cnt++;
1896         return cnt;
1897 }
1898
1899 /* Get the number of pfc enabled TCs, which have private buffer */
1900 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1901                                   struct hclge_pkt_buf_alloc *buf_alloc)
1902 {
1903         struct hclge_priv_buf *priv;
1904         unsigned int i;
1905         int cnt = 0;
1906
1907         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1908                 priv = &buf_alloc->priv_buf[i];
1909                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1910                     priv->enable)
1911                         cnt++;
1912         }
1913
1914         return cnt;
1915 }
1916
1917 /* Get the number of pfc disabled TCs, which have private buffer */
1918 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1919                                      struct hclge_pkt_buf_alloc *buf_alloc)
1920 {
1921         struct hclge_priv_buf *priv;
1922         unsigned int i;
1923         int cnt = 0;
1924
1925         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1926                 priv = &buf_alloc->priv_buf[i];
1927                 if (hdev->hw_tc_map & BIT(i) &&
1928                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1929                     priv->enable)
1930                         cnt++;
1931         }
1932
1933         return cnt;
1934 }
1935
1936 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1937 {
1938         struct hclge_priv_buf *priv;
1939         u32 rx_priv = 0;
1940         int i;
1941
1942         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1943                 priv = &buf_alloc->priv_buf[i];
1944                 if (priv->enable)
1945                         rx_priv += priv->buf_size;
1946         }
1947         return rx_priv;
1948 }
1949
1950 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1951 {
1952         u32 i, total_tx_size = 0;
1953
1954         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1955                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1956
1957         return total_tx_size;
1958 }
1959
1960 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1961                                 struct hclge_pkt_buf_alloc *buf_alloc,
1962                                 u32 rx_all)
1963 {
1964         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1965         u32 tc_num = hclge_get_tc_num(hdev);
1966         u32 shared_buf, aligned_mps;
1967         u32 rx_priv;
1968         int i;
1969
1970         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1971
1972         if (hnae3_dev_dcb_supported(hdev))
1973                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1974                                         hdev->dv_buf_size;
1975         else
1976                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1977                                         + hdev->dv_buf_size;
1978
1979         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1980         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1981                              HCLGE_BUF_SIZE_UNIT);
1982
1983         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1984         if (rx_all < rx_priv + shared_std)
1985                 return false;
1986
1987         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1988         buf_alloc->s_buf.buf_size = shared_buf;
1989         if (hnae3_dev_dcb_supported(hdev)) {
1990                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1991                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1992                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1993                                   HCLGE_BUF_SIZE_UNIT);
1994         } else {
1995                 buf_alloc->s_buf.self.high = aligned_mps +
1996                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1997                 buf_alloc->s_buf.self.low = aligned_mps;
1998         }
1999
2000         if (hnae3_dev_dcb_supported(hdev)) {
2001                 hi_thrd = shared_buf - hdev->dv_buf_size;
2002
2003                 if (tc_num <= NEED_RESERVE_TC_NUM)
2004                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2005                                         / BUF_MAX_PERCENT;
2006
2007                 if (tc_num)
2008                         hi_thrd = hi_thrd / tc_num;
2009
2010                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2011                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2012                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2013         } else {
2014                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2015                 lo_thrd = aligned_mps;
2016         }
2017
2018         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2019                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2020                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2021         }
2022
2023         return true;
2024 }
2025
2026 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2027                                 struct hclge_pkt_buf_alloc *buf_alloc)
2028 {
2029         u32 i, total_size;
2030
2031         total_size = hdev->pkt_buf_size;
2032
2033         /* alloc tx buffer for all enabled tc */
2034         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2035                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2036
2037                 if (hdev->hw_tc_map & BIT(i)) {
2038                         if (total_size < hdev->tx_buf_size)
2039                                 return -ENOMEM;
2040
2041                         priv->tx_buf_size = hdev->tx_buf_size;
2042                 } else {
2043                         priv->tx_buf_size = 0;
2044                 }
2045
2046                 total_size -= priv->tx_buf_size;
2047         }
2048
2049         return 0;
2050 }
2051
2052 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2053                                   struct hclge_pkt_buf_alloc *buf_alloc)
2054 {
2055         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2056         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2057         unsigned int i;
2058
2059         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2060                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2061
2062                 priv->enable = 0;
2063                 priv->wl.low = 0;
2064                 priv->wl.high = 0;
2065                 priv->buf_size = 0;
2066
2067                 if (!(hdev->hw_tc_map & BIT(i)))
2068                         continue;
2069
2070                 priv->enable = 1;
2071
2072                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2073                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2074                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2075                                                 HCLGE_BUF_SIZE_UNIT);
2076                 } else {
2077                         priv->wl.low = 0;
2078                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2079                                         aligned_mps;
2080                 }
2081
2082                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2083         }
2084
2085         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2086 }
2087
2088 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2089                                           struct hclge_pkt_buf_alloc *buf_alloc)
2090 {
2091         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2092         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2093         int i;
2094
2095         /* let the last to be cleared first */
2096         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2097                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2098                 unsigned int mask = BIT((unsigned int)i);
2099
2100                 if (hdev->hw_tc_map & mask &&
2101                     !(hdev->tm_info.hw_pfc_map & mask)) {
2102                         /* Clear the no pfc TC private buffer */
2103                         priv->wl.low = 0;
2104                         priv->wl.high = 0;
2105                         priv->buf_size = 0;
2106                         priv->enable = 0;
2107                         no_pfc_priv_num--;
2108                 }
2109
2110                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2111                     no_pfc_priv_num == 0)
2112                         break;
2113         }
2114
2115         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2116 }
2117
2118 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2119                                         struct hclge_pkt_buf_alloc *buf_alloc)
2120 {
2121         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2122         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2123         int i;
2124
2125         /* let the last to be cleared first */
2126         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2127                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2128                 unsigned int mask = BIT((unsigned int)i);
2129
2130                 if (hdev->hw_tc_map & mask &&
2131                     hdev->tm_info.hw_pfc_map & mask) {
2132                         /* Reduce the number of pfc TC with private buffer */
2133                         priv->wl.low = 0;
2134                         priv->enable = 0;
2135                         priv->wl.high = 0;
2136                         priv->buf_size = 0;
2137                         pfc_priv_num--;
2138                 }
2139
2140                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2141                     pfc_priv_num == 0)
2142                         break;
2143         }
2144
2145         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2146 }
2147
2148 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2149                                       struct hclge_pkt_buf_alloc *buf_alloc)
2150 {
2151 #define COMPENSATE_BUFFER       0x3C00
2152 #define COMPENSATE_HALF_MPS_NUM 5
2153 #define PRIV_WL_GAP             0x1800
2154
2155         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2156         u32 tc_num = hclge_get_tc_num(hdev);
2157         u32 half_mps = hdev->mps >> 1;
2158         u32 min_rx_priv;
2159         unsigned int i;
2160
2161         if (tc_num)
2162                 rx_priv = rx_priv / tc_num;
2163
2164         if (tc_num <= NEED_RESERVE_TC_NUM)
2165                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2166
2167         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2168                         COMPENSATE_HALF_MPS_NUM * half_mps;
2169         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2170         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2171
2172         if (rx_priv < min_rx_priv)
2173                 return false;
2174
2175         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2176                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2177
2178                 priv->enable = 0;
2179                 priv->wl.low = 0;
2180                 priv->wl.high = 0;
2181                 priv->buf_size = 0;
2182
2183                 if (!(hdev->hw_tc_map & BIT(i)))
2184                         continue;
2185
2186                 priv->enable = 1;
2187                 priv->buf_size = rx_priv;
2188                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2189                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2190         }
2191
2192         buf_alloc->s_buf.buf_size = 0;
2193
2194         return true;
2195 }
2196
2197 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2198  * @hdev: pointer to struct hclge_dev
2199  * @buf_alloc: pointer to buffer calculation data
2200  * @return: 0: calculate sucessful, negative: fail
2201  */
2202 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2203                                 struct hclge_pkt_buf_alloc *buf_alloc)
2204 {
2205         /* When DCB is not supported, rx private buffer is not allocated. */
2206         if (!hnae3_dev_dcb_supported(hdev)) {
2207                 u32 rx_all = hdev->pkt_buf_size;
2208
2209                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2210                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2211                         return -ENOMEM;
2212
2213                 return 0;
2214         }
2215
2216         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2217                 return 0;
2218
2219         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2220                 return 0;
2221
2222         /* try to decrease the buffer size */
2223         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2224                 return 0;
2225
2226         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2227                 return 0;
2228
2229         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2230                 return 0;
2231
2232         return -ENOMEM;
2233 }
2234
2235 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2236                                    struct hclge_pkt_buf_alloc *buf_alloc)
2237 {
2238         struct hclge_rx_priv_buff_cmd *req;
2239         struct hclge_desc desc;
2240         int ret;
2241         int i;
2242
2243         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2244         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2245
2246         /* Alloc private buffer TCs */
2247         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2248                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2249
2250                 req->buf_num[i] =
2251                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2252                 req->buf_num[i] |=
2253                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2254         }
2255
2256         req->shared_buf =
2257                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2258                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2259
2260         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2261         if (ret)
2262                 dev_err(&hdev->pdev->dev,
2263                         "rx private buffer alloc cmd failed %d\n", ret);
2264
2265         return ret;
2266 }
2267
2268 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2269                                    struct hclge_pkt_buf_alloc *buf_alloc)
2270 {
2271         struct hclge_rx_priv_wl_buf *req;
2272         struct hclge_priv_buf *priv;
2273         struct hclge_desc desc[2];
2274         int i, j;
2275         int ret;
2276
2277         for (i = 0; i < 2; i++) {
2278                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2279                                            false);
2280                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2281
2282                 /* The first descriptor set the NEXT bit to 1 */
2283                 if (i == 0)
2284                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2285                 else
2286                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2287
2288                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2289                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2290
2291                         priv = &buf_alloc->priv_buf[idx];
2292                         req->tc_wl[j].high =
2293                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2294                         req->tc_wl[j].high |=
2295                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2296                         req->tc_wl[j].low =
2297                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2298                         req->tc_wl[j].low |=
2299                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2300                 }
2301         }
2302
2303         /* Send 2 descriptor at one time */
2304         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2305         if (ret)
2306                 dev_err(&hdev->pdev->dev,
2307                         "rx private waterline config cmd failed %d\n",
2308                         ret);
2309         return ret;
2310 }
2311
2312 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2313                                     struct hclge_pkt_buf_alloc *buf_alloc)
2314 {
2315         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2316         struct hclge_rx_com_thrd *req;
2317         struct hclge_desc desc[2];
2318         struct hclge_tc_thrd *tc;
2319         int i, j;
2320         int ret;
2321
2322         for (i = 0; i < 2; i++) {
2323                 hclge_cmd_setup_basic_desc(&desc[i],
2324                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2325                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2326
2327                 /* The first descriptor set the NEXT bit to 1 */
2328                 if (i == 0)
2329                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2330                 else
2331                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2332
2333                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2334                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2335
2336                         req->com_thrd[j].high =
2337                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2338                         req->com_thrd[j].high |=
2339                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2340                         req->com_thrd[j].low =
2341                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2342                         req->com_thrd[j].low |=
2343                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2344                 }
2345         }
2346
2347         /* Send 2 descriptors at one time */
2348         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2349         if (ret)
2350                 dev_err(&hdev->pdev->dev,
2351                         "common threshold config cmd failed %d\n", ret);
2352         return ret;
2353 }
2354
2355 static int hclge_common_wl_config(struct hclge_dev *hdev,
2356                                   struct hclge_pkt_buf_alloc *buf_alloc)
2357 {
2358         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2359         struct hclge_rx_com_wl *req;
2360         struct hclge_desc desc;
2361         int ret;
2362
2363         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2364
2365         req = (struct hclge_rx_com_wl *)desc.data;
2366         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2367         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2368
2369         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2370         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2371
2372         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2373         if (ret)
2374                 dev_err(&hdev->pdev->dev,
2375                         "common waterline config cmd failed %d\n", ret);
2376
2377         return ret;
2378 }
2379
2380 int hclge_buffer_alloc(struct hclge_dev *hdev)
2381 {
2382         struct hclge_pkt_buf_alloc *pkt_buf;
2383         int ret;
2384
2385         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2386         if (!pkt_buf)
2387                 return -ENOMEM;
2388
2389         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2390         if (ret) {
2391                 dev_err(&hdev->pdev->dev,
2392                         "could not calc tx buffer size for all TCs %d\n", ret);
2393                 goto out;
2394         }
2395
2396         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2397         if (ret) {
2398                 dev_err(&hdev->pdev->dev,
2399                         "could not alloc tx buffers %d\n", ret);
2400                 goto out;
2401         }
2402
2403         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2404         if (ret) {
2405                 dev_err(&hdev->pdev->dev,
2406                         "could not calc rx priv buffer size for all TCs %d\n",
2407                         ret);
2408                 goto out;
2409         }
2410
2411         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2412         if (ret) {
2413                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2414                         ret);
2415                 goto out;
2416         }
2417
2418         if (hnae3_dev_dcb_supported(hdev)) {
2419                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2420                 if (ret) {
2421                         dev_err(&hdev->pdev->dev,
2422                                 "could not configure rx private waterline %d\n",
2423                                 ret);
2424                         goto out;
2425                 }
2426
2427                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2428                 if (ret) {
2429                         dev_err(&hdev->pdev->dev,
2430                                 "could not configure common threshold %d\n",
2431                                 ret);
2432                         goto out;
2433                 }
2434         }
2435
2436         ret = hclge_common_wl_config(hdev, pkt_buf);
2437         if (ret)
2438                 dev_err(&hdev->pdev->dev,
2439                         "could not configure common waterline %d\n", ret);
2440
2441 out:
2442         kfree(pkt_buf);
2443         return ret;
2444 }
2445
2446 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2447 {
2448         struct hnae3_handle *roce = &vport->roce;
2449         struct hnae3_handle *nic = &vport->nic;
2450         struct hclge_dev *hdev = vport->back;
2451
2452         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2453
2454         if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2455                 return -EINVAL;
2456
2457         roce->rinfo.base_vector = hdev->roce_base_vector;
2458
2459         roce->rinfo.netdev = nic->kinfo.netdev;
2460         roce->rinfo.roce_io_base = hdev->hw.io_base;
2461         roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2462
2463         roce->pdev = nic->pdev;
2464         roce->ae_algo = nic->ae_algo;
2465         roce->numa_node_mask = nic->numa_node_mask;
2466
2467         return 0;
2468 }
2469
2470 static int hclge_init_msi(struct hclge_dev *hdev)
2471 {
2472         struct pci_dev *pdev = hdev->pdev;
2473         int vectors;
2474         int i;
2475
2476         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2477                                         hdev->num_msi,
2478                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2479         if (vectors < 0) {
2480                 dev_err(&pdev->dev,
2481                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2482                         vectors);
2483                 return vectors;
2484         }
2485         if (vectors < hdev->num_msi)
2486                 dev_warn(&hdev->pdev->dev,
2487                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2488                          hdev->num_msi, vectors);
2489
2490         hdev->num_msi = vectors;
2491         hdev->num_msi_left = vectors;
2492
2493         hdev->base_msi_vector = pdev->irq;
2494         hdev->roce_base_vector = hdev->base_msi_vector +
2495                                 hdev->num_nic_msi;
2496
2497         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2498                                            sizeof(u16), GFP_KERNEL);
2499         if (!hdev->vector_status) {
2500                 pci_free_irq_vectors(pdev);
2501                 return -ENOMEM;
2502         }
2503
2504         for (i = 0; i < hdev->num_msi; i++)
2505                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2506
2507         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2508                                         sizeof(int), GFP_KERNEL);
2509         if (!hdev->vector_irq) {
2510                 pci_free_irq_vectors(pdev);
2511                 return -ENOMEM;
2512         }
2513
2514         return 0;
2515 }
2516
2517 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2518 {
2519         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2520                 duplex = HCLGE_MAC_FULL;
2521
2522         return duplex;
2523 }
2524
2525 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2526                                       u8 duplex)
2527 {
2528         struct hclge_config_mac_speed_dup_cmd *req;
2529         struct hclge_desc desc;
2530         int ret;
2531
2532         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2533
2534         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2535
2536         if (duplex)
2537                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2538
2539         switch (speed) {
2540         case HCLGE_MAC_SPEED_10M:
2541                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2542                                 HCLGE_CFG_SPEED_S, 6);
2543                 break;
2544         case HCLGE_MAC_SPEED_100M:
2545                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2546                                 HCLGE_CFG_SPEED_S, 7);
2547                 break;
2548         case HCLGE_MAC_SPEED_1G:
2549                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2550                                 HCLGE_CFG_SPEED_S, 0);
2551                 break;
2552         case HCLGE_MAC_SPEED_10G:
2553                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2554                                 HCLGE_CFG_SPEED_S, 1);
2555                 break;
2556         case HCLGE_MAC_SPEED_25G:
2557                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2558                                 HCLGE_CFG_SPEED_S, 2);
2559                 break;
2560         case HCLGE_MAC_SPEED_40G:
2561                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2562                                 HCLGE_CFG_SPEED_S, 3);
2563                 break;
2564         case HCLGE_MAC_SPEED_50G:
2565                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2566                                 HCLGE_CFG_SPEED_S, 4);
2567                 break;
2568         case HCLGE_MAC_SPEED_100G:
2569                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570                                 HCLGE_CFG_SPEED_S, 5);
2571                 break;
2572         case HCLGE_MAC_SPEED_200G:
2573                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574                                 HCLGE_CFG_SPEED_S, 8);
2575                 break;
2576         default:
2577                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2578                 return -EINVAL;
2579         }
2580
2581         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2582                       1);
2583
2584         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2585         if (ret) {
2586                 dev_err(&hdev->pdev->dev,
2587                         "mac speed/duplex config cmd failed %d.\n", ret);
2588                 return ret;
2589         }
2590
2591         return 0;
2592 }
2593
2594 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2595 {
2596         struct hclge_mac *mac = &hdev->hw.mac;
2597         int ret;
2598
2599         duplex = hclge_check_speed_dup(duplex, speed);
2600         if (!mac->support_autoneg && mac->speed == speed &&
2601             mac->duplex == duplex)
2602                 return 0;
2603
2604         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2605         if (ret)
2606                 return ret;
2607
2608         hdev->hw.mac.speed = speed;
2609         hdev->hw.mac.duplex = duplex;
2610
2611         return 0;
2612 }
2613
2614 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2615                                      u8 duplex)
2616 {
2617         struct hclge_vport *vport = hclge_get_vport(handle);
2618         struct hclge_dev *hdev = vport->back;
2619
2620         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2621 }
2622
2623 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2624 {
2625         struct hclge_config_auto_neg_cmd *req;
2626         struct hclge_desc desc;
2627         u32 flag = 0;
2628         int ret;
2629
2630         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2631
2632         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2633         if (enable)
2634                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2635         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2636
2637         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2638         if (ret)
2639                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2640                         ret);
2641
2642         return ret;
2643 }
2644
2645 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2646 {
2647         struct hclge_vport *vport = hclge_get_vport(handle);
2648         struct hclge_dev *hdev = vport->back;
2649
2650         if (!hdev->hw.mac.support_autoneg) {
2651                 if (enable) {
2652                         dev_err(&hdev->pdev->dev,
2653                                 "autoneg is not supported by current port\n");
2654                         return -EOPNOTSUPP;
2655                 } else {
2656                         return 0;
2657                 }
2658         }
2659
2660         return hclge_set_autoneg_en(hdev, enable);
2661 }
2662
2663 static int hclge_get_autoneg(struct hnae3_handle *handle)
2664 {
2665         struct hclge_vport *vport = hclge_get_vport(handle);
2666         struct hclge_dev *hdev = vport->back;
2667         struct phy_device *phydev = hdev->hw.mac.phydev;
2668
2669         if (phydev)
2670                 return phydev->autoneg;
2671
2672         return hdev->hw.mac.autoneg;
2673 }
2674
2675 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2676 {
2677         struct hclge_vport *vport = hclge_get_vport(handle);
2678         struct hclge_dev *hdev = vport->back;
2679         int ret;
2680
2681         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2682
2683         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2684         if (ret)
2685                 return ret;
2686         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2687 }
2688
2689 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2690 {
2691         struct hclge_vport *vport = hclge_get_vport(handle);
2692         struct hclge_dev *hdev = vport->back;
2693
2694         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2695                 return hclge_set_autoneg_en(hdev, !halt);
2696
2697         return 0;
2698 }
2699
2700 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2701 {
2702         struct hclge_config_fec_cmd *req;
2703         struct hclge_desc desc;
2704         int ret;
2705
2706         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2707
2708         req = (struct hclge_config_fec_cmd *)desc.data;
2709         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2710                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2711         if (fec_mode & BIT(HNAE3_FEC_RS))
2712                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2713                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2714         if (fec_mode & BIT(HNAE3_FEC_BASER))
2715                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2716                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2717
2718         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2719         if (ret)
2720                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2721
2722         return ret;
2723 }
2724
2725 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2726 {
2727         struct hclge_vport *vport = hclge_get_vport(handle);
2728         struct hclge_dev *hdev = vport->back;
2729         struct hclge_mac *mac = &hdev->hw.mac;
2730         int ret;
2731
2732         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2733                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2734                 return -EINVAL;
2735         }
2736
2737         ret = hclge_set_fec_hw(hdev, fec_mode);
2738         if (ret)
2739                 return ret;
2740
2741         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2742         return 0;
2743 }
2744
2745 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2746                           u8 *fec_mode)
2747 {
2748         struct hclge_vport *vport = hclge_get_vport(handle);
2749         struct hclge_dev *hdev = vport->back;
2750         struct hclge_mac *mac = &hdev->hw.mac;
2751
2752         if (fec_ability)
2753                 *fec_ability = mac->fec_ability;
2754         if (fec_mode)
2755                 *fec_mode = mac->fec_mode;
2756 }
2757
2758 static int hclge_mac_init(struct hclge_dev *hdev)
2759 {
2760         struct hclge_mac *mac = &hdev->hw.mac;
2761         int ret;
2762
2763         hdev->support_sfp_query = true;
2764         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2765         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2766                                          hdev->hw.mac.duplex);
2767         if (ret)
2768                 return ret;
2769
2770         if (hdev->hw.mac.support_autoneg) {
2771                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2772                 if (ret)
2773                         return ret;
2774         }
2775
2776         mac->link = 0;
2777
2778         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2779                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2780                 if (ret)
2781                         return ret;
2782         }
2783
2784         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2785         if (ret) {
2786                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2787                 return ret;
2788         }
2789
2790         ret = hclge_set_default_loopback(hdev);
2791         if (ret)
2792                 return ret;
2793
2794         ret = hclge_buffer_alloc(hdev);
2795         if (ret)
2796                 dev_err(&hdev->pdev->dev,
2797                         "allocate buffer fail, ret=%d\n", ret);
2798
2799         return ret;
2800 }
2801
2802 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2803 {
2804         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2805             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2806                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2807                                     hclge_wq, &hdev->service_task, 0);
2808 }
2809
2810 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2811 {
2812         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2813             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2814                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2815                                     hclge_wq, &hdev->service_task, 0);
2816 }
2817
2818 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2819 {
2820         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2821             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2822                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2823                                     hclge_wq, &hdev->service_task,
2824                                     delay_time);
2825 }
2826
2827 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2828 {
2829         struct hclge_link_status_cmd *req;
2830         struct hclge_desc desc;
2831         int ret;
2832
2833         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2834         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2835         if (ret) {
2836                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2837                         ret);
2838                 return ret;
2839         }
2840
2841         req = (struct hclge_link_status_cmd *)desc.data;
2842         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2843                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2844
2845         return 0;
2846 }
2847
2848 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2849 {
2850         struct phy_device *phydev = hdev->hw.mac.phydev;
2851
2852         *link_status = HCLGE_LINK_STATUS_DOWN;
2853
2854         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2855                 return 0;
2856
2857         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2858                 return 0;
2859
2860         return hclge_get_mac_link_status(hdev, link_status);
2861 }
2862
2863 static void hclge_update_link_status(struct hclge_dev *hdev)
2864 {
2865         struct hnae3_client *rclient = hdev->roce_client;
2866         struct hnae3_client *client = hdev->nic_client;
2867         struct hnae3_handle *rhandle;
2868         struct hnae3_handle *handle;
2869         int state;
2870         int ret;
2871         int i;
2872
2873         if (!client)
2874                 return;
2875
2876         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2877                 return;
2878
2879         ret = hclge_get_mac_phy_link(hdev, &state);
2880         if (ret) {
2881                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2882                 return;
2883         }
2884
2885         if (state != hdev->hw.mac.link) {
2886                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2887                         handle = &hdev->vport[i].nic;
2888                         client->ops->link_status_change(handle, state);
2889                         hclge_config_mac_tnl_int(hdev, state);
2890                         rhandle = &hdev->vport[i].roce;
2891                         if (rclient && rclient->ops->link_status_change)
2892                                 rclient->ops->link_status_change(rhandle,
2893                                                                  state);
2894                 }
2895                 hdev->hw.mac.link = state;
2896         }
2897
2898         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2899 }
2900
2901 static void hclge_update_port_capability(struct hclge_dev *hdev,
2902                                          struct hclge_mac *mac)
2903 {
2904         if (hnae3_dev_fec_supported(hdev))
2905                 /* update fec ability by speed */
2906                 hclge_convert_setting_fec(mac);
2907
2908         /* firmware can not identify back plane type, the media type
2909          * read from configuration can help deal it
2910          */
2911         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2912             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2913                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2914         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2915                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2916
2917         if (mac->support_autoneg) {
2918                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2919                 linkmode_copy(mac->advertising, mac->supported);
2920         } else {
2921                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2922                                    mac->supported);
2923                 linkmode_zero(mac->advertising);
2924         }
2925 }
2926
2927 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2928 {
2929         struct hclge_sfp_info_cmd *resp;
2930         struct hclge_desc desc;
2931         int ret;
2932
2933         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2934         resp = (struct hclge_sfp_info_cmd *)desc.data;
2935         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2936         if (ret == -EOPNOTSUPP) {
2937                 dev_warn(&hdev->pdev->dev,
2938                          "IMP do not support get SFP speed %d\n", ret);
2939                 return ret;
2940         } else if (ret) {
2941                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2942                 return ret;
2943         }
2944
2945         *speed = le32_to_cpu(resp->speed);
2946
2947         return 0;
2948 }
2949
2950 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2951 {
2952         struct hclge_sfp_info_cmd *resp;
2953         struct hclge_desc desc;
2954         int ret;
2955
2956         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2957         resp = (struct hclge_sfp_info_cmd *)desc.data;
2958
2959         resp->query_type = QUERY_ACTIVE_SPEED;
2960
2961         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2962         if (ret == -EOPNOTSUPP) {
2963                 dev_warn(&hdev->pdev->dev,
2964                          "IMP does not support get SFP info %d\n", ret);
2965                 return ret;
2966         } else if (ret) {
2967                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2968                 return ret;
2969         }
2970
2971         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2972          * set to mac->speed.
2973          */
2974         if (!le32_to_cpu(resp->speed))
2975                 return 0;
2976
2977         mac->speed = le32_to_cpu(resp->speed);
2978         /* if resp->speed_ability is 0, it means it's an old version
2979          * firmware, do not update these params
2980          */
2981         if (resp->speed_ability) {
2982                 mac->module_type = le32_to_cpu(resp->module_type);
2983                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2984                 mac->autoneg = resp->autoneg;
2985                 mac->support_autoneg = resp->autoneg_ability;
2986                 mac->speed_type = QUERY_ACTIVE_SPEED;
2987                 if (!resp->active_fec)
2988                         mac->fec_mode = 0;
2989                 else
2990                         mac->fec_mode = BIT(resp->active_fec);
2991         } else {
2992                 mac->speed_type = QUERY_SFP_SPEED;
2993         }
2994
2995         return 0;
2996 }
2997
2998 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
2999                                         struct ethtool_link_ksettings *cmd)
3000 {
3001         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3002         struct hclge_vport *vport = hclge_get_vport(handle);
3003         struct hclge_phy_link_ksetting_0_cmd *req0;
3004         struct hclge_phy_link_ksetting_1_cmd *req1;
3005         u32 supported, advertising, lp_advertising;
3006         struct hclge_dev *hdev = vport->back;
3007         int ret;
3008
3009         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3010                                    true);
3011         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3012         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3013                                    true);
3014
3015         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3016         if (ret) {
3017                 dev_err(&hdev->pdev->dev,
3018                         "failed to get phy link ksetting, ret = %d.\n", ret);
3019                 return ret;
3020         }
3021
3022         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3023         cmd->base.autoneg = req0->autoneg;
3024         cmd->base.speed = le32_to_cpu(req0->speed);
3025         cmd->base.duplex = req0->duplex;
3026         cmd->base.port = req0->port;
3027         cmd->base.transceiver = req0->transceiver;
3028         cmd->base.phy_address = req0->phy_address;
3029         cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3030         cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3031         supported = le32_to_cpu(req0->supported);
3032         advertising = le32_to_cpu(req0->advertising);
3033         lp_advertising = le32_to_cpu(req0->lp_advertising);
3034         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3035                                                 supported);
3036         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3037                                                 advertising);
3038         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3039                                                 lp_advertising);
3040
3041         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3042         cmd->base.master_slave_cfg = req1->master_slave_cfg;
3043         cmd->base.master_slave_state = req1->master_slave_state;
3044
3045         return 0;
3046 }
3047
3048 static int
3049 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3050                              const struct ethtool_link_ksettings *cmd)
3051 {
3052         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3053         struct hclge_vport *vport = hclge_get_vport(handle);
3054         struct hclge_phy_link_ksetting_0_cmd *req0;
3055         struct hclge_phy_link_ksetting_1_cmd *req1;
3056         struct hclge_dev *hdev = vport->back;
3057         u32 advertising;
3058         int ret;
3059
3060         if (cmd->base.autoneg == AUTONEG_DISABLE &&
3061             ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3062              (cmd->base.duplex != DUPLEX_HALF &&
3063               cmd->base.duplex != DUPLEX_FULL)))
3064                 return -EINVAL;
3065
3066         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3067                                    false);
3068         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3069         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3070                                    false);
3071
3072         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3073         req0->autoneg = cmd->base.autoneg;
3074         req0->speed = cpu_to_le32(cmd->base.speed);
3075         req0->duplex = cmd->base.duplex;
3076         ethtool_convert_link_mode_to_legacy_u32(&advertising,
3077                                                 cmd->link_modes.advertising);
3078         req0->advertising = cpu_to_le32(advertising);
3079         req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3080
3081         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3082         req1->master_slave_cfg = cmd->base.master_slave_cfg;
3083
3084         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3085         if (ret) {
3086                 dev_err(&hdev->pdev->dev,
3087                         "failed to set phy link ksettings, ret = %d.\n", ret);
3088                 return ret;
3089         }
3090
3091         hdev->hw.mac.autoneg = cmd->base.autoneg;
3092         hdev->hw.mac.speed = cmd->base.speed;
3093         hdev->hw.mac.duplex = cmd->base.duplex;
3094         linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3095
3096         return 0;
3097 }
3098
3099 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3100 {
3101         struct ethtool_link_ksettings cmd;
3102         int ret;
3103
3104         if (!hnae3_dev_phy_imp_supported(hdev))
3105                 return 0;
3106
3107         ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3108         if (ret)
3109                 return ret;
3110
3111         hdev->hw.mac.autoneg = cmd.base.autoneg;
3112         hdev->hw.mac.speed = cmd.base.speed;
3113         hdev->hw.mac.duplex = cmd.base.duplex;
3114
3115         return 0;
3116 }
3117
3118 static int hclge_tp_port_init(struct hclge_dev *hdev)
3119 {
3120         struct ethtool_link_ksettings cmd;
3121
3122         if (!hnae3_dev_phy_imp_supported(hdev))
3123                 return 0;
3124
3125         cmd.base.autoneg = hdev->hw.mac.autoneg;
3126         cmd.base.speed = hdev->hw.mac.speed;
3127         cmd.base.duplex = hdev->hw.mac.duplex;
3128         linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3129
3130         return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3131 }
3132
3133 static int hclge_update_port_info(struct hclge_dev *hdev)
3134 {
3135         struct hclge_mac *mac = &hdev->hw.mac;
3136         int speed = HCLGE_MAC_SPEED_UNKNOWN;
3137         int ret;
3138
3139         /* get the port info from SFP cmd if not copper port */
3140         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3141                 return hclge_update_tp_port_info(hdev);
3142
3143         /* if IMP does not support get SFP/qSFP info, return directly */
3144         if (!hdev->support_sfp_query)
3145                 return 0;
3146
3147         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3148                 ret = hclge_get_sfp_info(hdev, mac);
3149         else
3150                 ret = hclge_get_sfp_speed(hdev, &speed);
3151
3152         if (ret == -EOPNOTSUPP) {
3153                 hdev->support_sfp_query = false;
3154                 return ret;
3155         } else if (ret) {
3156                 return ret;
3157         }
3158
3159         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3160                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3161                         hclge_update_port_capability(hdev, mac);
3162                         return 0;
3163                 }
3164                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3165                                                HCLGE_MAC_FULL);
3166         } else {
3167                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3168                         return 0; /* do nothing if no SFP */
3169
3170                 /* must config full duplex for SFP */
3171                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3172         }
3173 }
3174
3175 static int hclge_get_status(struct hnae3_handle *handle)
3176 {
3177         struct hclge_vport *vport = hclge_get_vport(handle);
3178         struct hclge_dev *hdev = vport->back;
3179
3180         hclge_update_link_status(hdev);
3181
3182         return hdev->hw.mac.link;
3183 }
3184
3185 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3186 {
3187         if (!pci_num_vf(hdev->pdev)) {
3188                 dev_err(&hdev->pdev->dev,
3189                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3190                 return NULL;
3191         }
3192
3193         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3194                 dev_err(&hdev->pdev->dev,
3195                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3196                         vf, pci_num_vf(hdev->pdev));
3197                 return NULL;
3198         }
3199
3200         /* VF start from 1 in vport */
3201         vf += HCLGE_VF_VPORT_START_NUM;
3202         return &hdev->vport[vf];
3203 }
3204
3205 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3206                                struct ifla_vf_info *ivf)
3207 {
3208         struct hclge_vport *vport = hclge_get_vport(handle);
3209         struct hclge_dev *hdev = vport->back;
3210
3211         vport = hclge_get_vf_vport(hdev, vf);
3212         if (!vport)
3213                 return -EINVAL;
3214
3215         ivf->vf = vf;
3216         ivf->linkstate = vport->vf_info.link_state;
3217         ivf->spoofchk = vport->vf_info.spoofchk;
3218         ivf->trusted = vport->vf_info.trusted;
3219         ivf->min_tx_rate = 0;
3220         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3221         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3222         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3223         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3224         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3225
3226         return 0;
3227 }
3228
3229 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3230                                    int link_state)
3231 {
3232         struct hclge_vport *vport = hclge_get_vport(handle);
3233         struct hclge_dev *hdev = vport->back;
3234
3235         vport = hclge_get_vf_vport(hdev, vf);
3236         if (!vport)
3237                 return -EINVAL;
3238
3239         vport->vf_info.link_state = link_state;
3240
3241         return 0;
3242 }
3243
3244 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3245 {
3246         u32 cmdq_src_reg, msix_src_reg;
3247
3248         /* fetch the events from their corresponding regs */
3249         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3250         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3251
3252         /* Assumption: If by any chance reset and mailbox events are reported
3253          * together then we will only process reset event in this go and will
3254          * defer the processing of the mailbox events. Since, we would have not
3255          * cleared RX CMDQ event this time we would receive again another
3256          * interrupt from H/W just for the mailbox.
3257          *
3258          * check for vector0 reset event sources
3259          */
3260         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3261                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3262                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3263                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3264                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3265                 hdev->rst_stats.imp_rst_cnt++;
3266                 return HCLGE_VECTOR0_EVENT_RST;
3267         }
3268
3269         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3270                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3271                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3272                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3273                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3274                 hdev->rst_stats.global_rst_cnt++;
3275                 return HCLGE_VECTOR0_EVENT_RST;
3276         }
3277
3278         /* check for vector0 msix event source */
3279         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3280                 *clearval = msix_src_reg;
3281                 return HCLGE_VECTOR0_EVENT_ERR;
3282         }
3283
3284         /* check for vector0 mailbox(=CMDQ RX) event source */
3285         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3286                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3287                 *clearval = cmdq_src_reg;
3288                 return HCLGE_VECTOR0_EVENT_MBX;
3289         }
3290
3291         /* print other vector0 event source */
3292         dev_info(&hdev->pdev->dev,
3293                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3294                  cmdq_src_reg, msix_src_reg);
3295         *clearval = msix_src_reg;
3296
3297         return HCLGE_VECTOR0_EVENT_OTHER;
3298 }
3299
3300 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3301                                     u32 regclr)
3302 {
3303         switch (event_type) {
3304         case HCLGE_VECTOR0_EVENT_RST:
3305                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3306                 break;
3307         case HCLGE_VECTOR0_EVENT_MBX:
3308                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3309                 break;
3310         default:
3311                 break;
3312         }
3313 }
3314
3315 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3316 {
3317         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3318                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3319                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3320                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3321         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3322 }
3323
3324 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3325 {
3326         writel(enable ? 1 : 0, vector->addr);
3327 }
3328
3329 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3330 {
3331         struct hclge_dev *hdev = data;
3332         u32 clearval = 0;
3333         u32 event_cause;
3334
3335         hclge_enable_vector(&hdev->misc_vector, false);
3336         event_cause = hclge_check_event_cause(hdev, &clearval);
3337
3338         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3339         switch (event_cause) {
3340         case HCLGE_VECTOR0_EVENT_ERR:
3341                 /* we do not know what type of reset is required now. This could
3342                  * only be decided after we fetch the type of errors which
3343                  * caused this event. Therefore, we will do below for now:
3344                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3345                  *    have defered type of reset to be used.
3346                  * 2. Schedule the reset serivce task.
3347                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3348                  *    will fetch the correct type of reset.  This would be done
3349                  *    by first decoding the types of errors.
3350                  */
3351                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3352                 fallthrough;
3353         case HCLGE_VECTOR0_EVENT_RST:
3354                 hclge_reset_task_schedule(hdev);
3355                 break;
3356         case HCLGE_VECTOR0_EVENT_MBX:
3357                 /* If we are here then,
3358                  * 1. Either we are not handling any mbx task and we are not
3359                  *    scheduled as well
3360                  *                        OR
3361                  * 2. We could be handling a mbx task but nothing more is
3362                  *    scheduled.
3363                  * In both cases, we should schedule mbx task as there are more
3364                  * mbx messages reported by this interrupt.
3365                  */
3366                 hclge_mbx_task_schedule(hdev);
3367                 break;
3368         default:
3369                 dev_warn(&hdev->pdev->dev,
3370                          "received unknown or unhandled event of vector0\n");
3371                 break;
3372         }
3373
3374         hclge_clear_event_cause(hdev, event_cause, clearval);
3375
3376         /* Enable interrupt if it is not cause by reset. And when
3377          * clearval equal to 0, it means interrupt status may be
3378          * cleared by hardware before driver reads status register.
3379          * For this case, vector0 interrupt also should be enabled.
3380          */
3381         if (!clearval ||
3382             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3383                 hclge_enable_vector(&hdev->misc_vector, true);
3384         }
3385
3386         return IRQ_HANDLED;
3387 }
3388
3389 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3390 {
3391         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3392                 dev_warn(&hdev->pdev->dev,
3393                          "vector(vector_id %d) has been freed.\n", vector_id);
3394                 return;
3395         }
3396
3397         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3398         hdev->num_msi_left += 1;
3399         hdev->num_msi_used -= 1;
3400 }
3401
3402 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3403 {
3404         struct hclge_misc_vector *vector = &hdev->misc_vector;
3405
3406         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3407
3408         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3409         hdev->vector_status[0] = 0;
3410
3411         hdev->num_msi_left -= 1;
3412         hdev->num_msi_used += 1;
3413 }
3414
3415 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3416                                       const cpumask_t *mask)
3417 {
3418         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3419                                               affinity_notify);
3420
3421         cpumask_copy(&hdev->affinity_mask, mask);
3422 }
3423
3424 static void hclge_irq_affinity_release(struct kref *ref)
3425 {
3426 }
3427
3428 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3429 {
3430         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3431                               &hdev->affinity_mask);
3432
3433         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3434         hdev->affinity_notify.release = hclge_irq_affinity_release;
3435         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3436                                   &hdev->affinity_notify);
3437 }
3438
3439 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3440 {
3441         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3442         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3443 }
3444
3445 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3446 {
3447         int ret;
3448
3449         hclge_get_misc_vector(hdev);
3450
3451         /* this would be explicitly freed in the end */
3452         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3453                  HCLGE_NAME, pci_name(hdev->pdev));
3454         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3455                           0, hdev->misc_vector.name, hdev);
3456         if (ret) {
3457                 hclge_free_vector(hdev, 0);
3458                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3459                         hdev->misc_vector.vector_irq);
3460         }
3461
3462         return ret;
3463 }
3464
3465 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3466 {
3467         free_irq(hdev->misc_vector.vector_irq, hdev);
3468         hclge_free_vector(hdev, 0);
3469 }
3470
3471 int hclge_notify_client(struct hclge_dev *hdev,
3472                         enum hnae3_reset_notify_type type)
3473 {
3474         struct hnae3_client *client = hdev->nic_client;
3475         u16 i;
3476
3477         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3478                 return 0;
3479
3480         if (!client->ops->reset_notify)
3481                 return -EOPNOTSUPP;
3482
3483         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3484                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3485                 int ret;
3486
3487                 ret = client->ops->reset_notify(handle, type);
3488                 if (ret) {
3489                         dev_err(&hdev->pdev->dev,
3490                                 "notify nic client failed %d(%d)\n", type, ret);
3491                         return ret;
3492                 }
3493         }
3494
3495         return 0;
3496 }
3497
3498 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3499                                     enum hnae3_reset_notify_type type)
3500 {
3501         struct hnae3_client *client = hdev->roce_client;
3502         int ret;
3503         u16 i;
3504
3505         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3506                 return 0;
3507
3508         if (!client->ops->reset_notify)
3509                 return -EOPNOTSUPP;
3510
3511         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3512                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3513
3514                 ret = client->ops->reset_notify(handle, type);
3515                 if (ret) {
3516                         dev_err(&hdev->pdev->dev,
3517                                 "notify roce client failed %d(%d)",
3518                                 type, ret);
3519                         return ret;
3520                 }
3521         }
3522
3523         return ret;
3524 }
3525
3526 static int hclge_reset_wait(struct hclge_dev *hdev)
3527 {
3528 #define HCLGE_RESET_WATI_MS     100
3529 #define HCLGE_RESET_WAIT_CNT    350
3530
3531         u32 val, reg, reg_bit;
3532         u32 cnt = 0;
3533
3534         switch (hdev->reset_type) {
3535         case HNAE3_IMP_RESET:
3536                 reg = HCLGE_GLOBAL_RESET_REG;
3537                 reg_bit = HCLGE_IMP_RESET_BIT;
3538                 break;
3539         case HNAE3_GLOBAL_RESET:
3540                 reg = HCLGE_GLOBAL_RESET_REG;
3541                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3542                 break;
3543         case HNAE3_FUNC_RESET:
3544                 reg = HCLGE_FUN_RST_ING;
3545                 reg_bit = HCLGE_FUN_RST_ING_B;
3546                 break;
3547         default:
3548                 dev_err(&hdev->pdev->dev,
3549                         "Wait for unsupported reset type: %d\n",
3550                         hdev->reset_type);
3551                 return -EINVAL;
3552         }
3553
3554         val = hclge_read_dev(&hdev->hw, reg);
3555         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3556                 msleep(HCLGE_RESET_WATI_MS);
3557                 val = hclge_read_dev(&hdev->hw, reg);
3558                 cnt++;
3559         }
3560
3561         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3562                 dev_warn(&hdev->pdev->dev,
3563                          "Wait for reset timeout: %d\n", hdev->reset_type);
3564                 return -EBUSY;
3565         }
3566
3567         return 0;
3568 }
3569
3570 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3571 {
3572         struct hclge_vf_rst_cmd *req;
3573         struct hclge_desc desc;
3574
3575         req = (struct hclge_vf_rst_cmd *)desc.data;
3576         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3577         req->dest_vfid = func_id;
3578
3579         if (reset)
3580                 req->vf_rst = 0x1;
3581
3582         return hclge_cmd_send(&hdev->hw, &desc, 1);
3583 }
3584
3585 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3586 {
3587         int i;
3588
3589         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3590                 struct hclge_vport *vport = &hdev->vport[i];
3591                 int ret;
3592
3593                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3594                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3595                 if (ret) {
3596                         dev_err(&hdev->pdev->dev,
3597                                 "set vf(%u) rst failed %d!\n",
3598                                 vport->vport_id, ret);
3599                         return ret;
3600                 }
3601
3602                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3603                         continue;
3604
3605                 /* Inform VF to process the reset.
3606                  * hclge_inform_reset_assert_to_vf may fail if VF
3607                  * driver is not loaded.
3608                  */
3609                 ret = hclge_inform_reset_assert_to_vf(vport);
3610                 if (ret)
3611                         dev_warn(&hdev->pdev->dev,
3612                                  "inform reset to vf(%u) failed %d!\n",
3613                                  vport->vport_id, ret);
3614         }
3615
3616         return 0;
3617 }
3618
3619 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3620 {
3621         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3622             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3623             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3624                 return;
3625
3626         hclge_mbx_handler(hdev);
3627
3628         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3629 }
3630
3631 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3632 {
3633         struct hclge_pf_rst_sync_cmd *req;
3634         struct hclge_desc desc;
3635         int cnt = 0;
3636         int ret;
3637
3638         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3639         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3640
3641         do {
3642                 /* vf need to down netdev by mbx during PF or FLR reset */
3643                 hclge_mailbox_service_task(hdev);
3644
3645                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3646                 /* for compatible with old firmware, wait
3647                  * 100 ms for VF to stop IO
3648                  */
3649                 if (ret == -EOPNOTSUPP) {
3650                         msleep(HCLGE_RESET_SYNC_TIME);
3651                         return;
3652                 } else if (ret) {
3653                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3654                                  ret);
3655                         return;
3656                 } else if (req->all_vf_ready) {
3657                         return;
3658                 }
3659                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3660                 hclge_cmd_reuse_desc(&desc, true);
3661         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3662
3663         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3664 }
3665
3666 void hclge_report_hw_error(struct hclge_dev *hdev,
3667                            enum hnae3_hw_error_type type)
3668 {
3669         struct hnae3_client *client = hdev->nic_client;
3670         u16 i;
3671
3672         if (!client || !client->ops->process_hw_error ||
3673             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3674                 return;
3675
3676         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3677                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3678 }
3679
3680 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3681 {
3682         u32 reg_val;
3683
3684         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3685         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3686                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3687                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3688                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3689         }
3690
3691         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3692                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3693                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3694                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3695         }
3696 }
3697
3698 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3699 {
3700         struct hclge_desc desc;
3701         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3702         int ret;
3703
3704         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3705         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3706         req->fun_reset_vfid = func_id;
3707
3708         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3709         if (ret)
3710                 dev_err(&hdev->pdev->dev,
3711                         "send function reset cmd fail, status =%d\n", ret);
3712
3713         return ret;
3714 }
3715
3716 static void hclge_do_reset(struct hclge_dev *hdev)
3717 {
3718         struct hnae3_handle *handle = &hdev->vport[0].nic;
3719         struct pci_dev *pdev = hdev->pdev;
3720         u32 val;
3721
3722         if (hclge_get_hw_reset_stat(handle)) {
3723                 dev_info(&pdev->dev, "hardware reset not finish\n");
3724                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3725                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3726                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3727                 return;
3728         }
3729
3730         switch (hdev->reset_type) {
3731         case HNAE3_GLOBAL_RESET:
3732                 dev_info(&pdev->dev, "global reset requested\n");
3733                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3734                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3735                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3736                 break;
3737         case HNAE3_FUNC_RESET:
3738                 dev_info(&pdev->dev, "PF reset requested\n");
3739                 /* schedule again to check later */
3740                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3741                 hclge_reset_task_schedule(hdev);
3742                 break;
3743         default:
3744                 dev_warn(&pdev->dev,
3745                          "unsupported reset type: %d\n", hdev->reset_type);
3746                 break;
3747         }
3748 }
3749
3750 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3751                                                    unsigned long *addr)
3752 {
3753         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3754         struct hclge_dev *hdev = ae_dev->priv;
3755
3756         /* first, resolve any unknown reset type to the known type(s) */
3757         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3758                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3759                                         HCLGE_MISC_VECTOR_INT_STS);
3760                 /* we will intentionally ignore any errors from this function
3761                  *  as we will end up in *some* reset request in any case
3762                  */
3763                 if (hclge_handle_hw_msix_error(hdev, addr))
3764                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3765                                  msix_sts_reg);
3766
3767                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3768                 /* We defered the clearing of the error event which caused
3769                  * interrupt since it was not posssible to do that in
3770                  * interrupt context (and this is the reason we introduced
3771                  * new UNKNOWN reset type). Now, the errors have been
3772                  * handled and cleared in hardware we can safely enable
3773                  * interrupts. This is an exception to the norm.
3774                  */
3775                 hclge_enable_vector(&hdev->misc_vector, true);
3776         }
3777
3778         /* return the highest priority reset level amongst all */
3779         if (test_bit(HNAE3_IMP_RESET, addr)) {
3780                 rst_level = HNAE3_IMP_RESET;
3781                 clear_bit(HNAE3_IMP_RESET, addr);
3782                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3783                 clear_bit(HNAE3_FUNC_RESET, addr);
3784         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3785                 rst_level = HNAE3_GLOBAL_RESET;
3786                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3787                 clear_bit(HNAE3_FUNC_RESET, addr);
3788         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3789                 rst_level = HNAE3_FUNC_RESET;
3790                 clear_bit(HNAE3_FUNC_RESET, addr);
3791         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3792                 rst_level = HNAE3_FLR_RESET;
3793                 clear_bit(HNAE3_FLR_RESET, addr);
3794         }
3795
3796         if (hdev->reset_type != HNAE3_NONE_RESET &&
3797             rst_level < hdev->reset_type)
3798                 return HNAE3_NONE_RESET;
3799
3800         return rst_level;
3801 }
3802
3803 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3804 {
3805         u32 clearval = 0;
3806
3807         switch (hdev->reset_type) {
3808         case HNAE3_IMP_RESET:
3809                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3810                 break;
3811         case HNAE3_GLOBAL_RESET:
3812                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3813                 break;
3814         default:
3815                 break;
3816         }
3817
3818         if (!clearval)
3819                 return;
3820
3821         /* For revision 0x20, the reset interrupt source
3822          * can only be cleared after hardware reset done
3823          */
3824         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3825                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3826                                 clearval);
3827
3828         hclge_enable_vector(&hdev->misc_vector, true);
3829 }
3830
3831 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3832 {
3833         u32 reg_val;
3834
3835         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3836         if (enable)
3837                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3838         else
3839                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3840
3841         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3842 }
3843
3844 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3845 {
3846         int ret;
3847
3848         ret = hclge_set_all_vf_rst(hdev, true);
3849         if (ret)
3850                 return ret;
3851
3852         hclge_func_reset_sync_vf(hdev);
3853
3854         return 0;
3855 }
3856
3857 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3858 {
3859         u32 reg_val;
3860         int ret = 0;
3861
3862         switch (hdev->reset_type) {
3863         case HNAE3_FUNC_RESET:
3864                 ret = hclge_func_reset_notify_vf(hdev);
3865                 if (ret)
3866                         return ret;
3867
3868                 ret = hclge_func_reset_cmd(hdev, 0);
3869                 if (ret) {
3870                         dev_err(&hdev->pdev->dev,
3871                                 "asserting function reset fail %d!\n", ret);
3872                         return ret;
3873                 }
3874
3875                 /* After performaning pf reset, it is not necessary to do the
3876                  * mailbox handling or send any command to firmware, because
3877                  * any mailbox handling or command to firmware is only valid
3878                  * after hclge_cmd_init is called.
3879                  */
3880                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3881                 hdev->rst_stats.pf_rst_cnt++;
3882                 break;
3883         case HNAE3_FLR_RESET:
3884                 ret = hclge_func_reset_notify_vf(hdev);
3885                 if (ret)
3886                         return ret;
3887                 break;
3888         case HNAE3_IMP_RESET:
3889                 hclge_handle_imp_error(hdev);
3890                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3891                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3892                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3893                 break;
3894         default:
3895                 break;
3896         }
3897
3898         /* inform hardware that preparatory work is done */
3899         msleep(HCLGE_RESET_SYNC_TIME);
3900         hclge_reset_handshake(hdev, true);
3901         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3902
3903         return ret;
3904 }
3905
3906 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3907 {
3908 #define MAX_RESET_FAIL_CNT 5
3909
3910         if (hdev->reset_pending) {
3911                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3912                          hdev->reset_pending);
3913                 return true;
3914         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3915                    HCLGE_RESET_INT_M) {
3916                 dev_info(&hdev->pdev->dev,
3917                          "reset failed because new reset interrupt\n");
3918                 hclge_clear_reset_cause(hdev);
3919                 return false;
3920         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3921                 hdev->rst_stats.reset_fail_cnt++;
3922                 set_bit(hdev->reset_type, &hdev->reset_pending);
3923                 dev_info(&hdev->pdev->dev,
3924                          "re-schedule reset task(%u)\n",
3925                          hdev->rst_stats.reset_fail_cnt);
3926                 return true;
3927         }
3928
3929         hclge_clear_reset_cause(hdev);
3930
3931         /* recover the handshake status when reset fail */
3932         hclge_reset_handshake(hdev, true);
3933
3934         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3935
3936         hclge_dbg_dump_rst_info(hdev);
3937
3938         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3939
3940         return false;
3941 }
3942
3943 static int hclge_set_rst_done(struct hclge_dev *hdev)
3944 {
3945         struct hclge_pf_rst_done_cmd *req;
3946         struct hclge_desc desc;
3947         int ret;
3948
3949         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3950         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3951         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3952
3953         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3954         /* To be compatible with the old firmware, which does not support
3955          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3956          * return success
3957          */
3958         if (ret == -EOPNOTSUPP) {
3959                 dev_warn(&hdev->pdev->dev,
3960                          "current firmware does not support command(0x%x)!\n",
3961                          HCLGE_OPC_PF_RST_DONE);
3962                 return 0;
3963         } else if (ret) {
3964                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3965                         ret);
3966         }
3967
3968         return ret;
3969 }
3970
3971 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3972 {
3973         int ret = 0;
3974
3975         switch (hdev->reset_type) {
3976         case HNAE3_FUNC_RESET:
3977         case HNAE3_FLR_RESET:
3978                 ret = hclge_set_all_vf_rst(hdev, false);
3979                 break;
3980         case HNAE3_GLOBAL_RESET:
3981         case HNAE3_IMP_RESET:
3982                 ret = hclge_set_rst_done(hdev);
3983                 break;
3984         default:
3985                 break;
3986         }
3987
3988         /* clear up the handshake status after re-initialize done */
3989         hclge_reset_handshake(hdev, false);
3990
3991         return ret;
3992 }
3993
3994 static int hclge_reset_stack(struct hclge_dev *hdev)
3995 {
3996         int ret;
3997
3998         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3999         if (ret)
4000                 return ret;
4001
4002         ret = hclge_reset_ae_dev(hdev->ae_dev);
4003         if (ret)
4004                 return ret;
4005
4006         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4007 }
4008
4009 static int hclge_reset_prepare(struct hclge_dev *hdev)
4010 {
4011         int ret;
4012
4013         hdev->rst_stats.reset_cnt++;
4014         /* perform reset of the stack & ae device for a client */
4015         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4016         if (ret)
4017                 return ret;
4018
4019         rtnl_lock();
4020         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4021         rtnl_unlock();
4022         if (ret)
4023                 return ret;
4024
4025         return hclge_reset_prepare_wait(hdev);
4026 }
4027
4028 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4029 {
4030         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4031         enum hnae3_reset_type reset_level;
4032         int ret;
4033
4034         hdev->rst_stats.hw_reset_done_cnt++;
4035
4036         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4037         if (ret)
4038                 return ret;
4039
4040         rtnl_lock();
4041         ret = hclge_reset_stack(hdev);
4042         rtnl_unlock();
4043         if (ret)
4044                 return ret;
4045
4046         hclge_clear_reset_cause(hdev);
4047
4048         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4049         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4050          * times
4051          */
4052         if (ret &&
4053             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4054                 return ret;
4055
4056         ret = hclge_reset_prepare_up(hdev);
4057         if (ret)
4058                 return ret;
4059
4060         rtnl_lock();
4061         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4062         rtnl_unlock();
4063         if (ret)
4064                 return ret;
4065
4066         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4067         if (ret)
4068                 return ret;
4069
4070         hdev->last_reset_time = jiffies;
4071         hdev->rst_stats.reset_fail_cnt = 0;
4072         hdev->rst_stats.reset_done_cnt++;
4073         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4074
4075         /* if default_reset_request has a higher level reset request,
4076          * it should be handled as soon as possible. since some errors
4077          * need this kind of reset to fix.
4078          */
4079         reset_level = hclge_get_reset_level(ae_dev,
4080                                             &hdev->default_reset_request);
4081         if (reset_level != HNAE3_NONE_RESET)
4082                 set_bit(reset_level, &hdev->reset_request);
4083
4084         return 0;
4085 }
4086
4087 static void hclge_reset(struct hclge_dev *hdev)
4088 {
4089         if (hclge_reset_prepare(hdev))
4090                 goto err_reset;
4091
4092         if (hclge_reset_wait(hdev))
4093                 goto err_reset;
4094
4095         if (hclge_reset_rebuild(hdev))
4096                 goto err_reset;
4097
4098         return;
4099
4100 err_reset:
4101         if (hclge_reset_err_handle(hdev))
4102                 hclge_reset_task_schedule(hdev);
4103 }
4104
4105 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4106 {
4107         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4108         struct hclge_dev *hdev = ae_dev->priv;
4109
4110         /* We might end up getting called broadly because of 2 below cases:
4111          * 1. Recoverable error was conveyed through APEI and only way to bring
4112          *    normalcy is to reset.
4113          * 2. A new reset request from the stack due to timeout
4114          *
4115          * For the first case,error event might not have ae handle available.
4116          * check if this is a new reset request and we are not here just because
4117          * last reset attempt did not succeed and watchdog hit us again. We will
4118          * know this if last reset request did not occur very recently (watchdog
4119          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4120          * In case of new request we reset the "reset level" to PF reset.
4121          * And if it is a repeat reset request of the most recent one then we
4122          * want to make sure we throttle the reset request. Therefore, we will
4123          * not allow it again before 3*HZ times.
4124          */
4125         if (!handle)
4126                 handle = &hdev->vport[0].nic;
4127
4128         if (time_before(jiffies, (hdev->last_reset_time +
4129                                   HCLGE_RESET_INTERVAL))) {
4130                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4131                 return;
4132         } else if (hdev->default_reset_request) {
4133                 hdev->reset_level =
4134                         hclge_get_reset_level(ae_dev,
4135                                               &hdev->default_reset_request);
4136         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4137                 hdev->reset_level = HNAE3_FUNC_RESET;
4138         }
4139
4140         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4141                  hdev->reset_level);
4142
4143         /* request reset & schedule reset task */
4144         set_bit(hdev->reset_level, &hdev->reset_request);
4145         hclge_reset_task_schedule(hdev);
4146
4147         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4148                 hdev->reset_level++;
4149 }
4150
4151 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4152                                         enum hnae3_reset_type rst_type)
4153 {
4154         struct hclge_dev *hdev = ae_dev->priv;
4155
4156         set_bit(rst_type, &hdev->default_reset_request);
4157 }
4158
4159 static void hclge_reset_timer(struct timer_list *t)
4160 {
4161         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4162
4163         /* if default_reset_request has no value, it means that this reset
4164          * request has already be handled, so just return here
4165          */
4166         if (!hdev->default_reset_request)
4167                 return;
4168
4169         dev_info(&hdev->pdev->dev,
4170                  "triggering reset in reset timer\n");
4171         hclge_reset_event(hdev->pdev, NULL);
4172 }
4173
4174 static void hclge_reset_subtask(struct hclge_dev *hdev)
4175 {
4176         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4177
4178         /* check if there is any ongoing reset in the hardware. This status can
4179          * be checked from reset_pending. If there is then, we need to wait for
4180          * hardware to complete reset.
4181          *    a. If we are able to figure out in reasonable time that hardware
4182          *       has fully resetted then, we can proceed with driver, client
4183          *       reset.
4184          *    b. else, we can come back later to check this status so re-sched
4185          *       now.
4186          */
4187         hdev->last_reset_time = jiffies;
4188         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4189         if (hdev->reset_type != HNAE3_NONE_RESET)
4190                 hclge_reset(hdev);
4191
4192         /* check if we got any *new* reset requests to be honored */
4193         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4194         if (hdev->reset_type != HNAE3_NONE_RESET)
4195                 hclge_do_reset(hdev);
4196
4197         hdev->reset_type = HNAE3_NONE_RESET;
4198 }
4199
4200 static void hclge_reset_service_task(struct hclge_dev *hdev)
4201 {
4202         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4203                 return;
4204
4205         down(&hdev->reset_sem);
4206         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4207
4208         hclge_reset_subtask(hdev);
4209
4210         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4211         up(&hdev->reset_sem);
4212 }
4213
4214 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4215 {
4216         int i;
4217
4218         /* start from vport 1 for PF is always alive */
4219         for (i = 1; i < hdev->num_alloc_vport; i++) {
4220                 struct hclge_vport *vport = &hdev->vport[i];
4221
4222                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4223                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4224
4225                 /* If vf is not alive, set to default value */
4226                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4227                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4228         }
4229 }
4230
4231 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4232 {
4233         unsigned long delta = round_jiffies_relative(HZ);
4234
4235         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4236                 return;
4237
4238         /* Always handle the link updating to make sure link state is
4239          * updated when it is triggered by mbx.
4240          */
4241         hclge_update_link_status(hdev);
4242         hclge_sync_mac_table(hdev);
4243         hclge_sync_promisc_mode(hdev);
4244
4245         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4246                 delta = jiffies - hdev->last_serv_processed;
4247
4248                 if (delta < round_jiffies_relative(HZ)) {
4249                         delta = round_jiffies_relative(HZ) - delta;
4250                         goto out;
4251                 }
4252         }
4253
4254         hdev->serv_processed_cnt++;
4255         hclge_update_vport_alive(hdev);
4256
4257         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4258                 hdev->last_serv_processed = jiffies;
4259                 goto out;
4260         }
4261
4262         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4263                 hclge_update_stats_for_all(hdev);
4264
4265         hclge_update_port_info(hdev);
4266         hclge_sync_vlan_filter(hdev);
4267
4268         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4269                 hclge_rfs_filter_expire(hdev);
4270
4271         hdev->last_serv_processed = jiffies;
4272
4273 out:
4274         hclge_task_schedule(hdev, delta);
4275 }
4276
4277 static void hclge_service_task(struct work_struct *work)
4278 {
4279         struct hclge_dev *hdev =
4280                 container_of(work, struct hclge_dev, service_task.work);
4281
4282         hclge_reset_service_task(hdev);
4283         hclge_mailbox_service_task(hdev);
4284         hclge_periodic_service_task(hdev);
4285
4286         /* Handle reset and mbx again in case periodical task delays the
4287          * handling by calling hclge_task_schedule() in
4288          * hclge_periodic_service_task().
4289          */
4290         hclge_reset_service_task(hdev);
4291         hclge_mailbox_service_task(hdev);
4292 }
4293
4294 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4295 {
4296         /* VF handle has no client */
4297         if (!handle->client)
4298                 return container_of(handle, struct hclge_vport, nic);
4299         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4300                 return container_of(handle, struct hclge_vport, roce);
4301         else
4302                 return container_of(handle, struct hclge_vport, nic);
4303 }
4304
4305 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4306                                   struct hnae3_vector_info *vector_info)
4307 {
4308 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4309
4310         vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4311
4312         /* need an extend offset to config vector >= 64 */
4313         if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4314                 vector_info->io_addr = hdev->hw.io_base +
4315                                 HCLGE_VECTOR_REG_BASE +
4316                                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4317         else
4318                 vector_info->io_addr = hdev->hw.io_base +
4319                                 HCLGE_VECTOR_EXT_REG_BASE +
4320                                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4321                                 HCLGE_VECTOR_REG_OFFSET_H +
4322                                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4323                                 HCLGE_VECTOR_REG_OFFSET;
4324
4325         hdev->vector_status[idx] = hdev->vport[0].vport_id;
4326         hdev->vector_irq[idx] = vector_info->vector;
4327 }
4328
4329 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4330                             struct hnae3_vector_info *vector_info)
4331 {
4332         struct hclge_vport *vport = hclge_get_vport(handle);
4333         struct hnae3_vector_info *vector = vector_info;
4334         struct hclge_dev *hdev = vport->back;
4335         int alloc = 0;
4336         u16 i = 0;
4337         u16 j;
4338
4339         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4340         vector_num = min(hdev->num_msi_left, vector_num);
4341
4342         for (j = 0; j < vector_num; j++) {
4343                 while (++i < hdev->num_nic_msi) {
4344                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4345                                 hclge_get_vector_info(hdev, i, vector);
4346                                 vector++;
4347                                 alloc++;
4348
4349                                 break;
4350                         }
4351                 }
4352         }
4353         hdev->num_msi_left -= alloc;
4354         hdev->num_msi_used += alloc;
4355
4356         return alloc;
4357 }
4358
4359 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4360 {
4361         int i;
4362
4363         for (i = 0; i < hdev->num_msi; i++)
4364                 if (vector == hdev->vector_irq[i])
4365                         return i;
4366
4367         return -EINVAL;
4368 }
4369
4370 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4371 {
4372         struct hclge_vport *vport = hclge_get_vport(handle);
4373         struct hclge_dev *hdev = vport->back;
4374         int vector_id;
4375
4376         vector_id = hclge_get_vector_index(hdev, vector);
4377         if (vector_id < 0) {
4378                 dev_err(&hdev->pdev->dev,
4379                         "Get vector index fail. vector = %d\n", vector);
4380                 return vector_id;
4381         }
4382
4383         hclge_free_vector(hdev, vector_id);
4384
4385         return 0;
4386 }
4387
4388 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4389 {
4390         return HCLGE_RSS_KEY_SIZE;
4391 }
4392
4393 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4394                                   const u8 hfunc, const u8 *key)
4395 {
4396         struct hclge_rss_config_cmd *req;
4397         unsigned int key_offset = 0;
4398         struct hclge_desc desc;
4399         int key_counts;
4400         int key_size;
4401         int ret;
4402
4403         key_counts = HCLGE_RSS_KEY_SIZE;
4404         req = (struct hclge_rss_config_cmd *)desc.data;
4405
4406         while (key_counts) {
4407                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4408                                            false);
4409
4410                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4411                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4412
4413                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4414                 memcpy(req->hash_key,
4415                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4416
4417                 key_counts -= key_size;
4418                 key_offset++;
4419                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4420                 if (ret) {
4421                         dev_err(&hdev->pdev->dev,
4422                                 "Configure RSS config fail, status = %d\n",
4423                                 ret);
4424                         return ret;
4425                 }
4426         }
4427         return 0;
4428 }
4429
4430 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4431 {
4432         struct hclge_rss_indirection_table_cmd *req;
4433         struct hclge_desc desc;
4434         int rss_cfg_tbl_num;
4435         u8 rss_msb_oft;
4436         u8 rss_msb_val;
4437         int ret;
4438         u16 qid;
4439         int i;
4440         u32 j;
4441
4442         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4443         rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4444                           HCLGE_RSS_CFG_TBL_SIZE;
4445
4446         for (i = 0; i < rss_cfg_tbl_num; i++) {
4447                 hclge_cmd_setup_basic_desc
4448                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4449
4450                 req->start_table_index =
4451                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4452                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4453                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4454                         qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4455                         req->rss_qid_l[j] = qid & 0xff;
4456                         rss_msb_oft =
4457                                 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4458                         rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4459                                 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4460                         req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4461                 }
4462                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4463                 if (ret) {
4464                         dev_err(&hdev->pdev->dev,
4465                                 "Configure rss indir table fail,status = %d\n",
4466                                 ret);
4467                         return ret;
4468                 }
4469         }
4470         return 0;
4471 }
4472
4473 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4474                                  u16 *tc_size, u16 *tc_offset)
4475 {
4476         struct hclge_rss_tc_mode_cmd *req;
4477         struct hclge_desc desc;
4478         int ret;
4479         int i;
4480
4481         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4482         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4483
4484         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4485                 u16 mode = 0;
4486
4487                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4488                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4489                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4490                 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4491                               tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4492                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4493                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4494
4495                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4496         }
4497
4498         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4499         if (ret)
4500                 dev_err(&hdev->pdev->dev,
4501                         "Configure rss tc mode fail, status = %d\n", ret);
4502
4503         return ret;
4504 }
4505
4506 static void hclge_get_rss_type(struct hclge_vport *vport)
4507 {
4508         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4509             vport->rss_tuple_sets.ipv4_udp_en ||
4510             vport->rss_tuple_sets.ipv4_sctp_en ||
4511             vport->rss_tuple_sets.ipv6_tcp_en ||
4512             vport->rss_tuple_sets.ipv6_udp_en ||
4513             vport->rss_tuple_sets.ipv6_sctp_en)
4514                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4515         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4516                  vport->rss_tuple_sets.ipv6_fragment_en)
4517                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4518         else
4519                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4520 }
4521
4522 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4523 {
4524         struct hclge_rss_input_tuple_cmd *req;
4525         struct hclge_desc desc;
4526         int ret;
4527
4528         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4529
4530         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4531
4532         /* Get the tuple cfg from pf */
4533         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4534         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4535         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4536         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4537         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4538         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4539         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4540         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4541         hclge_get_rss_type(&hdev->vport[0]);
4542         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4543         if (ret)
4544                 dev_err(&hdev->pdev->dev,
4545                         "Configure rss input fail, status = %d\n", ret);
4546         return ret;
4547 }
4548
4549 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4550                          u8 *key, u8 *hfunc)
4551 {
4552         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4553         struct hclge_vport *vport = hclge_get_vport(handle);
4554         int i;
4555
4556         /* Get hash algorithm */
4557         if (hfunc) {
4558                 switch (vport->rss_algo) {
4559                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4560                         *hfunc = ETH_RSS_HASH_TOP;
4561                         break;
4562                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4563                         *hfunc = ETH_RSS_HASH_XOR;
4564                         break;
4565                 default:
4566                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4567                         break;
4568                 }
4569         }
4570
4571         /* Get the RSS Key required by the user */
4572         if (key)
4573                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4574
4575         /* Get indirect table */
4576         if (indir)
4577                 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4578                         indir[i] =  vport->rss_indirection_tbl[i];
4579
4580         return 0;
4581 }
4582
4583 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4584                          const  u8 *key, const  u8 hfunc)
4585 {
4586         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4587         struct hclge_vport *vport = hclge_get_vport(handle);
4588         struct hclge_dev *hdev = vport->back;
4589         u8 hash_algo;
4590         int ret, i;
4591
4592         /* Set the RSS Hash Key if specififed by the user */
4593         if (key) {
4594                 switch (hfunc) {
4595                 case ETH_RSS_HASH_TOP:
4596                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4597                         break;
4598                 case ETH_RSS_HASH_XOR:
4599                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4600                         break;
4601                 case ETH_RSS_HASH_NO_CHANGE:
4602                         hash_algo = vport->rss_algo;
4603                         break;
4604                 default:
4605                         return -EINVAL;
4606                 }
4607
4608                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4609                 if (ret)
4610                         return ret;
4611
4612                 /* Update the shadow RSS key with user specified qids */
4613                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4614                 vport->rss_algo = hash_algo;
4615         }
4616
4617         /* Update the shadow RSS table with user specified qids */
4618         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4619                 vport->rss_indirection_tbl[i] = indir[i];
4620
4621         /* Update the hardware */
4622         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4623 }
4624
4625 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4626 {
4627         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4628
4629         if (nfc->data & RXH_L4_B_2_3)
4630                 hash_sets |= HCLGE_D_PORT_BIT;
4631         else
4632                 hash_sets &= ~HCLGE_D_PORT_BIT;
4633
4634         if (nfc->data & RXH_IP_SRC)
4635                 hash_sets |= HCLGE_S_IP_BIT;
4636         else
4637                 hash_sets &= ~HCLGE_S_IP_BIT;
4638
4639         if (nfc->data & RXH_IP_DST)
4640                 hash_sets |= HCLGE_D_IP_BIT;
4641         else
4642                 hash_sets &= ~HCLGE_D_IP_BIT;
4643
4644         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4645                 hash_sets |= HCLGE_V_TAG_BIT;
4646
4647         return hash_sets;
4648 }
4649
4650 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4651                                     struct ethtool_rxnfc *nfc,
4652                                     struct hclge_rss_input_tuple_cmd *req)
4653 {
4654         struct hclge_dev *hdev = vport->back;
4655         u8 tuple_sets;
4656
4657         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4658         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4659         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4660         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4661         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4662         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4663         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4664         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4665
4666         tuple_sets = hclge_get_rss_hash_bits(nfc);
4667         switch (nfc->flow_type) {
4668         case TCP_V4_FLOW:
4669                 req->ipv4_tcp_en = tuple_sets;
4670                 break;
4671         case TCP_V6_FLOW:
4672                 req->ipv6_tcp_en = tuple_sets;
4673                 break;
4674         case UDP_V4_FLOW:
4675                 req->ipv4_udp_en = tuple_sets;
4676                 break;
4677         case UDP_V6_FLOW:
4678                 req->ipv6_udp_en = tuple_sets;
4679                 break;
4680         case SCTP_V4_FLOW:
4681                 req->ipv4_sctp_en = tuple_sets;
4682                 break;
4683         case SCTP_V6_FLOW:
4684                 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4685                     (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4686                         return -EINVAL;
4687
4688                 req->ipv6_sctp_en = tuple_sets;
4689                 break;
4690         case IPV4_FLOW:
4691                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4692                 break;
4693         case IPV6_FLOW:
4694                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4695                 break;
4696         default:
4697                 return -EINVAL;
4698         }
4699
4700         return 0;
4701 }
4702
4703 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4704                                struct ethtool_rxnfc *nfc)
4705 {
4706         struct hclge_vport *vport = hclge_get_vport(handle);
4707         struct hclge_dev *hdev = vport->back;
4708         struct hclge_rss_input_tuple_cmd *req;
4709         struct hclge_desc desc;
4710         int ret;
4711
4712         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4713                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4714                 return -EINVAL;
4715
4716         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4717         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4718
4719         ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4720         if (ret) {
4721                 dev_err(&hdev->pdev->dev,
4722                         "failed to init rss tuple cmd, ret = %d\n", ret);
4723                 return ret;
4724         }
4725
4726         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4727         if (ret) {
4728                 dev_err(&hdev->pdev->dev,
4729                         "Set rss tuple fail, status = %d\n", ret);
4730                 return ret;
4731         }
4732
4733         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4734         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4735         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4736         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4737         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4738         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4739         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4740         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4741         hclge_get_rss_type(vport);
4742         return 0;
4743 }
4744
4745 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4746                                      u8 *tuple_sets)
4747 {
4748         switch (flow_type) {
4749         case TCP_V4_FLOW:
4750                 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4751                 break;
4752         case UDP_V4_FLOW:
4753                 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4754                 break;
4755         case TCP_V6_FLOW:
4756                 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4757                 break;
4758         case UDP_V6_FLOW:
4759                 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4760                 break;
4761         case SCTP_V4_FLOW:
4762                 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4763                 break;
4764         case SCTP_V6_FLOW:
4765                 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4766                 break;
4767         case IPV4_FLOW:
4768         case IPV6_FLOW:
4769                 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4770                 break;
4771         default:
4772                 return -EINVAL;
4773         }
4774
4775         return 0;
4776 }
4777
4778 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4779 {
4780         u64 tuple_data = 0;
4781
4782         if (tuple_sets & HCLGE_D_PORT_BIT)
4783                 tuple_data |= RXH_L4_B_2_3;
4784         if (tuple_sets & HCLGE_S_PORT_BIT)
4785                 tuple_data |= RXH_L4_B_0_1;
4786         if (tuple_sets & HCLGE_D_IP_BIT)
4787                 tuple_data |= RXH_IP_DST;
4788         if (tuple_sets & HCLGE_S_IP_BIT)
4789                 tuple_data |= RXH_IP_SRC;
4790
4791         return tuple_data;
4792 }
4793
4794 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4795                                struct ethtool_rxnfc *nfc)
4796 {
4797         struct hclge_vport *vport = hclge_get_vport(handle);
4798         u8 tuple_sets;
4799         int ret;
4800
4801         nfc->data = 0;
4802
4803         ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4804         if (ret || !tuple_sets)
4805                 return ret;
4806
4807         nfc->data = hclge_convert_rss_tuple(tuple_sets);
4808
4809         return 0;
4810 }
4811
4812 static int hclge_get_tc_size(struct hnae3_handle *handle)
4813 {
4814         struct hclge_vport *vport = hclge_get_vport(handle);
4815         struct hclge_dev *hdev = vport->back;
4816
4817         return hdev->pf_rss_size_max;
4818 }
4819
4820 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4821 {
4822         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4823         struct hclge_vport *vport = hdev->vport;
4824         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4825         u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4826         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4827         struct hnae3_tc_info *tc_info;
4828         u16 roundup_size;
4829         u16 rss_size;
4830         int i;
4831
4832         tc_info = &vport->nic.kinfo.tc_info;
4833         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4834                 rss_size = tc_info->tqp_count[i];
4835                 tc_valid[i] = 0;
4836
4837                 if (!(hdev->hw_tc_map & BIT(i)))
4838                         continue;
4839
4840                 /* tc_size set to hardware is the log2 of roundup power of two
4841                  * of rss_size, the acutal queue size is limited by indirection
4842                  * table.
4843                  */
4844                 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4845                     rss_size == 0) {
4846                         dev_err(&hdev->pdev->dev,
4847                                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4848                                 rss_size);
4849                         return -EINVAL;
4850                 }
4851
4852                 roundup_size = roundup_pow_of_two(rss_size);
4853                 roundup_size = ilog2(roundup_size);
4854
4855                 tc_valid[i] = 1;
4856                 tc_size[i] = roundup_size;
4857                 tc_offset[i] = tc_info->tqp_offset[i];
4858         }
4859
4860         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4861 }
4862
4863 int hclge_rss_init_hw(struct hclge_dev *hdev)
4864 {
4865         struct hclge_vport *vport = hdev->vport;
4866         u16 *rss_indir = vport[0].rss_indirection_tbl;
4867         u8 *key = vport[0].rss_hash_key;
4868         u8 hfunc = vport[0].rss_algo;
4869         int ret;
4870
4871         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4872         if (ret)
4873                 return ret;
4874
4875         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4876         if (ret)
4877                 return ret;
4878
4879         ret = hclge_set_rss_input_tuple(hdev);
4880         if (ret)
4881                 return ret;
4882
4883         return hclge_init_rss_tc_mode(hdev);
4884 }
4885
4886 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4887 {
4888         struct hclge_vport *vport = hdev->vport;
4889         int i, j;
4890
4891         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4892                 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4893                         vport[j].rss_indirection_tbl[i] =
4894                                 i % vport[j].alloc_rss_size;
4895         }
4896 }
4897
4898 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4899 {
4900         u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4901         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4902         struct hclge_vport *vport = hdev->vport;
4903
4904         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4905                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4906
4907         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4908                 u16 *rss_ind_tbl;
4909
4910                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4911                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4912                 vport[i].rss_tuple_sets.ipv4_udp_en =
4913                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4914                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4915                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4916                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4917                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4918                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4919                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4920                 vport[i].rss_tuple_sets.ipv6_udp_en =
4921                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4922                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4923                         hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4924                         HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4925                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4926                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4927                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4928
4929                 vport[i].rss_algo = rss_algo;
4930
4931                 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4932                                            sizeof(*rss_ind_tbl), GFP_KERNEL);
4933                 if (!rss_ind_tbl)
4934                         return -ENOMEM;
4935
4936                 vport[i].rss_indirection_tbl = rss_ind_tbl;
4937                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4938                        HCLGE_RSS_KEY_SIZE);
4939         }
4940
4941         hclge_rss_indir_init_cfg(hdev);
4942
4943         return 0;
4944 }
4945
4946 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4947                                 int vector_id, bool en,
4948                                 struct hnae3_ring_chain_node *ring_chain)
4949 {
4950         struct hclge_dev *hdev = vport->back;
4951         struct hnae3_ring_chain_node *node;
4952         struct hclge_desc desc;
4953         struct hclge_ctrl_vector_chain_cmd *req =
4954                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4955         enum hclge_cmd_status status;
4956         enum hclge_opcode_type op;
4957         u16 tqp_type_and_id;
4958         int i;
4959
4960         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4961         hclge_cmd_setup_basic_desc(&desc, op, false);
4962         req->int_vector_id_l = hnae3_get_field(vector_id,
4963                                                HCLGE_VECTOR_ID_L_M,
4964                                                HCLGE_VECTOR_ID_L_S);
4965         req->int_vector_id_h = hnae3_get_field(vector_id,
4966                                                HCLGE_VECTOR_ID_H_M,
4967                                                HCLGE_VECTOR_ID_H_S);
4968
4969         i = 0;
4970         for (node = ring_chain; node; node = node->next) {
4971                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4972                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4973                                 HCLGE_INT_TYPE_S,
4974                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4975                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4976                                 HCLGE_TQP_ID_S, node->tqp_index);
4977                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4978                                 HCLGE_INT_GL_IDX_S,
4979                                 hnae3_get_field(node->int_gl_idx,
4980                                                 HNAE3_RING_GL_IDX_M,
4981                                                 HNAE3_RING_GL_IDX_S));
4982                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4983                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4984                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4985                         req->vfid = vport->vport_id;
4986
4987                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4988                         if (status) {
4989                                 dev_err(&hdev->pdev->dev,
4990                                         "Map TQP fail, status is %d.\n",
4991                                         status);
4992                                 return -EIO;
4993                         }
4994                         i = 0;
4995
4996                         hclge_cmd_setup_basic_desc(&desc,
4997                                                    op,
4998                                                    false);
4999                         req->int_vector_id_l =
5000                                 hnae3_get_field(vector_id,
5001                                                 HCLGE_VECTOR_ID_L_M,
5002                                                 HCLGE_VECTOR_ID_L_S);
5003                         req->int_vector_id_h =
5004                                 hnae3_get_field(vector_id,
5005                                                 HCLGE_VECTOR_ID_H_M,
5006                                                 HCLGE_VECTOR_ID_H_S);
5007                 }
5008         }
5009
5010         if (i > 0) {
5011                 req->int_cause_num = i;
5012                 req->vfid = vport->vport_id;
5013                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5014                 if (status) {
5015                         dev_err(&hdev->pdev->dev,
5016                                 "Map TQP fail, status is %d.\n", status);
5017                         return -EIO;
5018                 }
5019         }
5020
5021         return 0;
5022 }
5023
5024 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5025                                     struct hnae3_ring_chain_node *ring_chain)
5026 {
5027         struct hclge_vport *vport = hclge_get_vport(handle);
5028         struct hclge_dev *hdev = vport->back;
5029         int vector_id;
5030
5031         vector_id = hclge_get_vector_index(hdev, vector);
5032         if (vector_id < 0) {
5033                 dev_err(&hdev->pdev->dev,
5034                         "failed to get vector index. vector=%d\n", vector);
5035                 return vector_id;
5036         }
5037
5038         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5039 }
5040
5041 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5042                                        struct hnae3_ring_chain_node *ring_chain)
5043 {
5044         struct hclge_vport *vport = hclge_get_vport(handle);
5045         struct hclge_dev *hdev = vport->back;
5046         int vector_id, ret;
5047
5048         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5049                 return 0;
5050
5051         vector_id = hclge_get_vector_index(hdev, vector);
5052         if (vector_id < 0) {
5053                 dev_err(&handle->pdev->dev,
5054                         "Get vector index fail. ret =%d\n", vector_id);
5055                 return vector_id;
5056         }
5057
5058         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5059         if (ret)
5060                 dev_err(&handle->pdev->dev,
5061                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5062                         vector_id, ret);
5063
5064         return ret;
5065 }
5066
5067 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5068                                       bool en_uc, bool en_mc, bool en_bc)
5069 {
5070         struct hclge_vport *vport = &hdev->vport[vf_id];
5071         struct hnae3_handle *handle = &vport->nic;
5072         struct hclge_promisc_cfg_cmd *req;
5073         struct hclge_desc desc;
5074         bool uc_tx_en = en_uc;
5075         u8 promisc_cfg = 0;
5076         int ret;
5077
5078         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5079
5080         req = (struct hclge_promisc_cfg_cmd *)desc.data;
5081         req->vf_id = vf_id;
5082
5083         if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5084                 uc_tx_en = false;
5085
5086         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5087         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5088         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5089         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5090         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5091         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5092         req->extend_promisc = promisc_cfg;
5093
5094         /* to be compatible with DEVICE_VERSION_V1/2 */
5095         promisc_cfg = 0;
5096         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5097         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5098         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5099         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5100         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5101         req->promisc = promisc_cfg;
5102
5103         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5104         if (ret)
5105                 dev_err(&hdev->pdev->dev,
5106                         "failed to set vport %u promisc mode, ret = %d.\n",
5107                         vf_id, ret);
5108
5109         return ret;
5110 }
5111
5112 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5113                                  bool en_mc_pmc, bool en_bc_pmc)
5114 {
5115         return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5116                                           en_uc_pmc, en_mc_pmc, en_bc_pmc);
5117 }
5118
5119 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5120                                   bool en_mc_pmc)
5121 {
5122         struct hclge_vport *vport = hclge_get_vport(handle);
5123         struct hclge_dev *hdev = vport->back;
5124         bool en_bc_pmc = true;
5125
5126         /* For device whose version below V2, if broadcast promisc enabled,
5127          * vlan filter is always bypassed. So broadcast promisc should be
5128          * disabled until user enable promisc mode
5129          */
5130         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5131                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5132
5133         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5134                                             en_bc_pmc);
5135 }
5136
5137 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5138 {
5139         struct hclge_vport *vport = hclge_get_vport(handle);
5140         struct hclge_dev *hdev = vport->back;
5141
5142         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
5143 }
5144
5145 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5146 {
5147         struct hclge_get_fd_mode_cmd *req;
5148         struct hclge_desc desc;
5149         int ret;
5150
5151         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5152
5153         req = (struct hclge_get_fd_mode_cmd *)desc.data;
5154
5155         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5156         if (ret) {
5157                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5158                 return ret;
5159         }
5160
5161         *fd_mode = req->mode;
5162
5163         return ret;
5164 }
5165
5166 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5167                                    u32 *stage1_entry_num,
5168                                    u32 *stage2_entry_num,
5169                                    u16 *stage1_counter_num,
5170                                    u16 *stage2_counter_num)
5171 {
5172         struct hclge_get_fd_allocation_cmd *req;
5173         struct hclge_desc desc;
5174         int ret;
5175
5176         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5177
5178         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5179
5180         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5181         if (ret) {
5182                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5183                         ret);
5184                 return ret;
5185         }
5186
5187         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5188         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5189         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5190         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5191
5192         return ret;
5193 }
5194
5195 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5196                                    enum HCLGE_FD_STAGE stage_num)
5197 {
5198         struct hclge_set_fd_key_config_cmd *req;
5199         struct hclge_fd_key_cfg *stage;
5200         struct hclge_desc desc;
5201         int ret;
5202
5203         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5204
5205         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5206         stage = &hdev->fd_cfg.key_cfg[stage_num];
5207         req->stage = stage_num;
5208         req->key_select = stage->key_sel;
5209         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5210         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5211         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5212         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5213         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5214         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5215
5216         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5217         if (ret)
5218                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5219
5220         return ret;
5221 }
5222
5223 static int hclge_init_fd_config(struct hclge_dev *hdev)
5224 {
5225 #define LOW_2_WORDS             0x03
5226         struct hclge_fd_key_cfg *key_cfg;
5227         int ret;
5228
5229         if (!hnae3_dev_fd_supported(hdev))
5230                 return 0;
5231
5232         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5233         if (ret)
5234                 return ret;
5235
5236         switch (hdev->fd_cfg.fd_mode) {
5237         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5238                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5239                 break;
5240         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5241                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5242                 break;
5243         default:
5244                 dev_err(&hdev->pdev->dev,
5245                         "Unsupported flow director mode %u\n",
5246                         hdev->fd_cfg.fd_mode);
5247                 return -EOPNOTSUPP;
5248         }
5249
5250         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5251         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5252         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5253         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5254         key_cfg->outer_sipv6_word_en = 0;
5255         key_cfg->outer_dipv6_word_en = 0;
5256
5257         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5258                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5259                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5260                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5261
5262         /* If use max 400bit key, we can support tuples for ether type */
5263         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5264                 key_cfg->tuple_active |=
5265                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5266
5267         /* roce_type is used to filter roce frames
5268          * dst_vport is used to specify the rule
5269          */
5270         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5271
5272         ret = hclge_get_fd_allocation(hdev,
5273                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5274                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5275                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5276                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5277         if (ret)
5278                 return ret;
5279
5280         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5281 }
5282
5283 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5284                                 int loc, u8 *key, bool is_add)
5285 {
5286         struct hclge_fd_tcam_config_1_cmd *req1;
5287         struct hclge_fd_tcam_config_2_cmd *req2;
5288         struct hclge_fd_tcam_config_3_cmd *req3;
5289         struct hclge_desc desc[3];
5290         int ret;
5291
5292         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5293         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5294         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5295         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5296         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5297
5298         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5299         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5300         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5301
5302         req1->stage = stage;
5303         req1->xy_sel = sel_x ? 1 : 0;
5304         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5305         req1->index = cpu_to_le32(loc);
5306         req1->entry_vld = sel_x ? is_add : 0;
5307
5308         if (key) {
5309                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5310                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5311                        sizeof(req2->tcam_data));
5312                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5313                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5314         }
5315
5316         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5317         if (ret)
5318                 dev_err(&hdev->pdev->dev,
5319                         "config tcam key fail, ret=%d\n",
5320                         ret);
5321
5322         return ret;
5323 }
5324
5325 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5326                               struct hclge_fd_ad_data *action)
5327 {
5328         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5329         struct hclge_fd_ad_config_cmd *req;
5330         struct hclge_desc desc;
5331         u64 ad_data = 0;
5332         int ret;
5333
5334         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5335
5336         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5337         req->index = cpu_to_le32(loc);
5338         req->stage = stage;
5339
5340         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5341                       action->write_rule_id_to_bd);
5342         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5343                         action->rule_id);
5344         if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5345                 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5346                               action->override_tc);
5347                 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5348                                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5349         }
5350         ad_data <<= 32;
5351         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5352         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5353                       action->forward_to_direct_queue);
5354         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5355                         action->queue_id);
5356         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5357         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5358                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5359         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5360         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5361                         action->counter_id);
5362
5363         req->ad_data = cpu_to_le64(ad_data);
5364         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5365         if (ret)
5366                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5367
5368         return ret;
5369 }
5370
5371 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5372                                    struct hclge_fd_rule *rule)
5373 {
5374         u16 tmp_x_s, tmp_y_s;
5375         u32 tmp_x_l, tmp_y_l;
5376         int i;
5377
5378         if (rule->unused_tuple & tuple_bit)
5379                 return true;
5380
5381         switch (tuple_bit) {
5382         case BIT(INNER_DST_MAC):
5383                 for (i = 0; i < ETH_ALEN; i++) {
5384                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5385                                rule->tuples_mask.dst_mac[i]);
5386                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5387                                rule->tuples_mask.dst_mac[i]);
5388                 }
5389
5390                 return true;
5391         case BIT(INNER_SRC_MAC):
5392                 for (i = 0; i < ETH_ALEN; i++) {
5393                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5394                                rule->tuples_mask.src_mac[i]);
5395                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5396                                rule->tuples_mask.src_mac[i]);
5397                 }
5398
5399                 return true;
5400         case BIT(INNER_VLAN_TAG_FST):
5401                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5402                        rule->tuples_mask.vlan_tag1);
5403                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5404                        rule->tuples_mask.vlan_tag1);
5405                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5406                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5407
5408                 return true;
5409         case BIT(INNER_ETH_TYPE):
5410                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5411                        rule->tuples_mask.ether_proto);
5412                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5413                        rule->tuples_mask.ether_proto);
5414                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5415                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5416
5417                 return true;
5418         case BIT(INNER_IP_TOS):
5419                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5420                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5421
5422                 return true;
5423         case BIT(INNER_IP_PROTO):
5424                 calc_x(*key_x, rule->tuples.ip_proto,
5425                        rule->tuples_mask.ip_proto);
5426                 calc_y(*key_y, rule->tuples.ip_proto,
5427                        rule->tuples_mask.ip_proto);
5428
5429                 return true;
5430         case BIT(INNER_SRC_IP):
5431                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5432                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5433                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5434                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5435                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5436                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5437
5438                 return true;
5439         case BIT(INNER_DST_IP):
5440                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5441                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5442                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5443                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5444                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5445                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5446
5447                 return true;
5448         case BIT(INNER_SRC_PORT):
5449                 calc_x(tmp_x_s, rule->tuples.src_port,
5450                        rule->tuples_mask.src_port);
5451                 calc_y(tmp_y_s, rule->tuples.src_port,
5452                        rule->tuples_mask.src_port);
5453                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5454                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5455
5456                 return true;
5457         case BIT(INNER_DST_PORT):
5458                 calc_x(tmp_x_s, rule->tuples.dst_port,
5459                        rule->tuples_mask.dst_port);
5460                 calc_y(tmp_y_s, rule->tuples.dst_port,
5461                        rule->tuples_mask.dst_port);
5462                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5463                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5464
5465                 return true;
5466         default:
5467                 return false;
5468         }
5469 }
5470
5471 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5472                                  u8 vf_id, u8 network_port_id)
5473 {
5474         u32 port_number = 0;
5475
5476         if (port_type == HOST_PORT) {
5477                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5478                                 pf_id);
5479                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5480                                 vf_id);
5481                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5482         } else {
5483                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5484                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5485                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5486         }
5487
5488         return port_number;
5489 }
5490
5491 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5492                                        __le32 *key_x, __le32 *key_y,
5493                                        struct hclge_fd_rule *rule)
5494 {
5495         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5496         u8 cur_pos = 0, tuple_size, shift_bits;
5497         unsigned int i;
5498
5499         for (i = 0; i < MAX_META_DATA; i++) {
5500                 tuple_size = meta_data_key_info[i].key_length;
5501                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5502
5503                 switch (tuple_bit) {
5504                 case BIT(ROCE_TYPE):
5505                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5506                         cur_pos += tuple_size;
5507                         break;
5508                 case BIT(DST_VPORT):
5509                         port_number = hclge_get_port_number(HOST_PORT, 0,
5510                                                             rule->vf_id, 0);
5511                         hnae3_set_field(meta_data,
5512                                         GENMASK(cur_pos + tuple_size, cur_pos),
5513                                         cur_pos, port_number);
5514                         cur_pos += tuple_size;
5515                         break;
5516                 default:
5517                         break;
5518                 }
5519         }
5520
5521         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5522         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5523         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5524
5525         *key_x = cpu_to_le32(tmp_x << shift_bits);
5526         *key_y = cpu_to_le32(tmp_y << shift_bits);
5527 }
5528
5529 /* A complete key is combined with meta data key and tuple key.
5530  * Meta data key is stored at the MSB region, and tuple key is stored at
5531  * the LSB region, unused bits will be filled 0.
5532  */
5533 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5534                             struct hclge_fd_rule *rule)
5535 {
5536         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5537         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5538         u8 *cur_key_x, *cur_key_y;
5539         u8 meta_data_region;
5540         u8 tuple_size;
5541         int ret;
5542         u32 i;
5543
5544         memset(key_x, 0, sizeof(key_x));
5545         memset(key_y, 0, sizeof(key_y));
5546         cur_key_x = key_x;
5547         cur_key_y = key_y;
5548
5549         for (i = 0 ; i < MAX_TUPLE; i++) {
5550                 bool tuple_valid;
5551                 u32 check_tuple;
5552
5553                 tuple_size = tuple_key_info[i].key_length / 8;
5554                 check_tuple = key_cfg->tuple_active & BIT(i);
5555
5556                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5557                                                      cur_key_y, rule);
5558                 if (tuple_valid) {
5559                         cur_key_x += tuple_size;
5560                         cur_key_y += tuple_size;
5561                 }
5562         }
5563
5564         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5565                         MAX_META_DATA_LENGTH / 8;
5566
5567         hclge_fd_convert_meta_data(key_cfg,
5568                                    (__le32 *)(key_x + meta_data_region),
5569                                    (__le32 *)(key_y + meta_data_region),
5570                                    rule);
5571
5572         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5573                                    true);
5574         if (ret) {
5575                 dev_err(&hdev->pdev->dev,
5576                         "fd key_y config fail, loc=%u, ret=%d\n",
5577                         rule->queue_id, ret);
5578                 return ret;
5579         }
5580
5581         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5582                                    true);
5583         if (ret)
5584                 dev_err(&hdev->pdev->dev,
5585                         "fd key_x config fail, loc=%u, ret=%d\n",
5586                         rule->queue_id, ret);
5587         return ret;
5588 }
5589
5590 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5591                                struct hclge_fd_rule *rule)
5592 {
5593         struct hclge_vport *vport = hdev->vport;
5594         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5595         struct hclge_fd_ad_data ad_data;
5596
5597         memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5598         ad_data.ad_id = rule->location;
5599
5600         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5601                 ad_data.drop_packet = true;
5602         } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5603                 ad_data.override_tc = true;
5604                 ad_data.queue_id =
5605                         kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5606                 ad_data.tc_size =
5607                         ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5608         } else {
5609                 ad_data.forward_to_direct_queue = true;
5610                 ad_data.queue_id = rule->queue_id;
5611         }
5612
5613         ad_data.use_counter = false;
5614         ad_data.counter_id = 0;
5615
5616         ad_data.use_next_stage = false;
5617         ad_data.next_input_key = 0;
5618
5619         ad_data.write_rule_id_to_bd = true;
5620         ad_data.rule_id = rule->location;
5621
5622         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5623 }
5624
5625 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5626                                        u32 *unused_tuple)
5627 {
5628         if (!spec || !unused_tuple)
5629                 return -EINVAL;
5630
5631         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5632
5633         if (!spec->ip4src)
5634                 *unused_tuple |= BIT(INNER_SRC_IP);
5635
5636         if (!spec->ip4dst)
5637                 *unused_tuple |= BIT(INNER_DST_IP);
5638
5639         if (!spec->psrc)
5640                 *unused_tuple |= BIT(INNER_SRC_PORT);
5641
5642         if (!spec->pdst)
5643                 *unused_tuple |= BIT(INNER_DST_PORT);
5644
5645         if (!spec->tos)
5646                 *unused_tuple |= BIT(INNER_IP_TOS);
5647
5648         return 0;
5649 }
5650
5651 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5652                                     u32 *unused_tuple)
5653 {
5654         if (!spec || !unused_tuple)
5655                 return -EINVAL;
5656
5657         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5658                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5659
5660         if (!spec->ip4src)
5661                 *unused_tuple |= BIT(INNER_SRC_IP);
5662
5663         if (!spec->ip4dst)
5664                 *unused_tuple |= BIT(INNER_DST_IP);
5665
5666         if (!spec->tos)
5667                 *unused_tuple |= BIT(INNER_IP_TOS);
5668
5669         if (!spec->proto)
5670                 *unused_tuple |= BIT(INNER_IP_PROTO);
5671
5672         if (spec->l4_4_bytes)
5673                 return -EOPNOTSUPP;
5674
5675         if (spec->ip_ver != ETH_RX_NFC_IP4)
5676                 return -EOPNOTSUPP;
5677
5678         return 0;
5679 }
5680
5681 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5682                                        u32 *unused_tuple)
5683 {
5684         if (!spec || !unused_tuple)
5685                 return -EINVAL;
5686
5687         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5688                 BIT(INNER_IP_TOS);
5689
5690         /* check whether src/dst ip address used */
5691         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5692                 *unused_tuple |= BIT(INNER_SRC_IP);
5693
5694         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5695                 *unused_tuple |= BIT(INNER_DST_IP);
5696
5697         if (!spec->psrc)
5698                 *unused_tuple |= BIT(INNER_SRC_PORT);
5699
5700         if (!spec->pdst)
5701                 *unused_tuple |= BIT(INNER_DST_PORT);
5702
5703         if (spec->tclass)
5704                 return -EOPNOTSUPP;
5705
5706         return 0;
5707 }
5708
5709 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5710                                     u32 *unused_tuple)
5711 {
5712         if (!spec || !unused_tuple)
5713                 return -EINVAL;
5714
5715         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5716                 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5717
5718         /* check whether src/dst ip address used */
5719         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5720                 *unused_tuple |= BIT(INNER_SRC_IP);
5721
5722         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5723                 *unused_tuple |= BIT(INNER_DST_IP);
5724
5725         if (!spec->l4_proto)
5726                 *unused_tuple |= BIT(INNER_IP_PROTO);
5727
5728         if (spec->tclass)
5729                 return -EOPNOTSUPP;
5730
5731         if (spec->l4_4_bytes)
5732                 return -EOPNOTSUPP;
5733
5734         return 0;
5735 }
5736
5737 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5738 {
5739         if (!spec || !unused_tuple)
5740                 return -EINVAL;
5741
5742         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5743                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5744                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5745
5746         if (is_zero_ether_addr(spec->h_source))
5747                 *unused_tuple |= BIT(INNER_SRC_MAC);
5748
5749         if (is_zero_ether_addr(spec->h_dest))
5750                 *unused_tuple |= BIT(INNER_DST_MAC);
5751
5752         if (!spec->h_proto)
5753                 *unused_tuple |= BIT(INNER_ETH_TYPE);
5754
5755         return 0;
5756 }
5757
5758 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5759                                     struct ethtool_rx_flow_spec *fs,
5760                                     u32 *unused_tuple)
5761 {
5762         if (fs->flow_type & FLOW_EXT) {
5763                 if (fs->h_ext.vlan_etype) {
5764                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5765                         return -EOPNOTSUPP;
5766                 }
5767
5768                 if (!fs->h_ext.vlan_tci)
5769                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5770
5771                 if (fs->m_ext.vlan_tci &&
5772                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5773                         dev_err(&hdev->pdev->dev,
5774                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5775                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5776                         return -EINVAL;
5777                 }
5778         } else {
5779                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5780         }
5781
5782         if (fs->flow_type & FLOW_MAC_EXT) {
5783                 if (hdev->fd_cfg.fd_mode !=
5784                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5785                         dev_err(&hdev->pdev->dev,
5786                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5787                         return -EOPNOTSUPP;
5788                 }
5789
5790                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5791                         *unused_tuple |= BIT(INNER_DST_MAC);
5792                 else
5793                         *unused_tuple &= ~BIT(INNER_DST_MAC);
5794         }
5795
5796         return 0;
5797 }
5798
5799 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5800                                struct ethtool_rx_flow_spec *fs,
5801                                u32 *unused_tuple)
5802 {
5803         u32 flow_type;
5804         int ret;
5805
5806         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5807                 dev_err(&hdev->pdev->dev,
5808                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5809                         fs->location,
5810                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5811                 return -EINVAL;
5812         }
5813
5814         if ((fs->flow_type & FLOW_EXT) &&
5815             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5816                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5817                 return -EOPNOTSUPP;
5818         }
5819
5820         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5821         switch (flow_type) {
5822         case SCTP_V4_FLOW:
5823         case TCP_V4_FLOW:
5824         case UDP_V4_FLOW:
5825                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5826                                                   unused_tuple);
5827                 break;
5828         case IP_USER_FLOW:
5829                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5830                                                unused_tuple);
5831                 break;
5832         case SCTP_V6_FLOW:
5833         case TCP_V6_FLOW:
5834         case UDP_V6_FLOW:
5835                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5836                                                   unused_tuple);
5837                 break;
5838         case IPV6_USER_FLOW:
5839                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5840                                                unused_tuple);
5841                 break;
5842         case ETHER_FLOW:
5843                 if (hdev->fd_cfg.fd_mode !=
5844                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5845                         dev_err(&hdev->pdev->dev,
5846                                 "ETHER_FLOW is not supported in current fd mode!\n");
5847                         return -EOPNOTSUPP;
5848                 }
5849
5850                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5851                                                  unused_tuple);
5852                 break;
5853         default:
5854                 dev_err(&hdev->pdev->dev,
5855                         "unsupported protocol type, protocol type = %#x\n",
5856                         flow_type);
5857                 return -EOPNOTSUPP;
5858         }
5859
5860         if (ret) {
5861                 dev_err(&hdev->pdev->dev,
5862                         "failed to check flow union tuple, ret = %d\n",
5863                         ret);
5864                 return ret;
5865         }
5866
5867         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5868 }
5869
5870 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5871 {
5872         struct hclge_fd_rule *rule = NULL;
5873         struct hlist_node *node2;
5874
5875         spin_lock_bh(&hdev->fd_rule_lock);
5876         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5877                 if (rule->location >= location)
5878                         break;
5879         }
5880
5881         spin_unlock_bh(&hdev->fd_rule_lock);
5882
5883         return  rule && rule->location == location;
5884 }
5885
5886 /* make sure being called after lock up with fd_rule_lock */
5887 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5888                                      struct hclge_fd_rule *new_rule,
5889                                      u16 location,
5890                                      bool is_add)
5891 {
5892         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5893         struct hlist_node *node2;
5894
5895         if (is_add && !new_rule)
5896                 return -EINVAL;
5897
5898         hlist_for_each_entry_safe(rule, node2,
5899                                   &hdev->fd_rule_list, rule_node) {
5900                 if (rule->location >= location)
5901                         break;
5902                 parent = rule;
5903         }
5904
5905         if (rule && rule->location == location) {
5906                 hlist_del(&rule->rule_node);
5907                 kfree(rule);
5908                 hdev->hclge_fd_rule_num--;
5909
5910                 if (!is_add) {
5911                         if (!hdev->hclge_fd_rule_num)
5912                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5913                         clear_bit(location, hdev->fd_bmap);
5914
5915                         return 0;
5916                 }
5917         } else if (!is_add) {
5918                 dev_err(&hdev->pdev->dev,
5919                         "delete fail, rule %u is inexistent\n",
5920                         location);
5921                 return -EINVAL;
5922         }
5923
5924         INIT_HLIST_NODE(&new_rule->rule_node);
5925
5926         if (parent)
5927                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5928         else
5929                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5930
5931         set_bit(location, hdev->fd_bmap);
5932         hdev->hclge_fd_rule_num++;
5933         hdev->fd_active_type = new_rule->rule_type;
5934
5935         return 0;
5936 }
5937
5938 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
5939                                       struct ethtool_rx_flow_spec *fs,
5940                                       struct hclge_fd_rule *rule, u8 ip_proto)
5941 {
5942         rule->tuples.src_ip[IPV4_INDEX] =
5943                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5944         rule->tuples_mask.src_ip[IPV4_INDEX] =
5945                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5946
5947         rule->tuples.dst_ip[IPV4_INDEX] =
5948                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5949         rule->tuples_mask.dst_ip[IPV4_INDEX] =
5950                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5951
5952         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5953         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5954
5955         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5956         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5957
5958         rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5959         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5960
5961         rule->tuples.ether_proto = ETH_P_IP;
5962         rule->tuples_mask.ether_proto = 0xFFFF;
5963
5964         rule->tuples.ip_proto = ip_proto;
5965         rule->tuples_mask.ip_proto = 0xFF;
5966 }
5967
5968 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
5969                                    struct ethtool_rx_flow_spec *fs,
5970                                    struct hclge_fd_rule *rule)
5971 {
5972         rule->tuples.src_ip[IPV4_INDEX] =
5973                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5974         rule->tuples_mask.src_ip[IPV4_INDEX] =
5975                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5976
5977         rule->tuples.dst_ip[IPV4_INDEX] =
5978                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5979         rule->tuples_mask.dst_ip[IPV4_INDEX] =
5980                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5981
5982         rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5983         rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5984
5985         rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5986         rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5987
5988         rule->tuples.ether_proto = ETH_P_IP;
5989         rule->tuples_mask.ether_proto = 0xFFFF;
5990 }
5991
5992 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
5993                                       struct ethtool_rx_flow_spec *fs,
5994                                       struct hclge_fd_rule *rule, u8 ip_proto)
5995 {
5996         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
5997                           IPV6_SIZE);
5998         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
5999                           IPV6_SIZE);
6000
6001         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6002                           IPV6_SIZE);
6003         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6004                           IPV6_SIZE);
6005
6006         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6007         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6008
6009         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6010         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6011
6012         rule->tuples.ether_proto = ETH_P_IPV6;
6013         rule->tuples_mask.ether_proto = 0xFFFF;
6014
6015         rule->tuples.ip_proto = ip_proto;
6016         rule->tuples_mask.ip_proto = 0xFF;
6017 }
6018
6019 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6020                                    struct ethtool_rx_flow_spec *fs,
6021                                    struct hclge_fd_rule *rule)
6022 {
6023         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6024                           IPV6_SIZE);
6025         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6026                           IPV6_SIZE);
6027
6028         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6029                           IPV6_SIZE);
6030         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6031                           IPV6_SIZE);
6032
6033         rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6034         rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6035
6036         rule->tuples.ether_proto = ETH_P_IPV6;
6037         rule->tuples_mask.ether_proto = 0xFFFF;
6038 }
6039
6040 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6041                                      struct ethtool_rx_flow_spec *fs,
6042                                      struct hclge_fd_rule *rule)
6043 {
6044         ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6045         ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6046
6047         ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6048         ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6049
6050         rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6051         rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6052 }
6053
6054 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6055                               struct ethtool_rx_flow_spec *fs,
6056                               struct hclge_fd_rule *rule)
6057 {
6058         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6059
6060         switch (flow_type) {
6061         case SCTP_V4_FLOW:
6062                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6063                 break;
6064         case TCP_V4_FLOW:
6065                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6066                 break;
6067         case UDP_V4_FLOW:
6068                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6069                 break;
6070         case IP_USER_FLOW:
6071                 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6072                 break;
6073         case SCTP_V6_FLOW:
6074                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6075                 break;
6076         case TCP_V6_FLOW:
6077                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6078                 break;
6079         case UDP_V6_FLOW:
6080                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6081                 break;
6082         case IPV6_USER_FLOW:
6083                 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6084                 break;
6085         case ETHER_FLOW:
6086                 hclge_fd_get_ether_tuple(hdev, fs, rule);
6087                 break;
6088         default:
6089                 return -EOPNOTSUPP;
6090         }
6091
6092         if (fs->flow_type & FLOW_EXT) {
6093                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6094                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6095         }
6096
6097         if (fs->flow_type & FLOW_MAC_EXT) {
6098                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6099                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6100         }
6101
6102         return 0;
6103 }
6104
6105 /* make sure being called after lock up with fd_rule_lock */
6106 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6107                                 struct hclge_fd_rule *rule)
6108 {
6109         int ret;
6110
6111         if (!rule) {
6112                 dev_err(&hdev->pdev->dev,
6113                         "The flow director rule is NULL\n");
6114                 return -EINVAL;
6115         }
6116
6117         /* it will never fail here, so needn't to check return value */
6118         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
6119
6120         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6121         if (ret)
6122                 goto clear_rule;
6123
6124         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6125         if (ret)
6126                 goto clear_rule;
6127
6128         return 0;
6129
6130 clear_rule:
6131         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
6132         return ret;
6133 }
6134
6135 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6136 {
6137         struct hclge_vport *vport = hclge_get_vport(handle);
6138         struct hclge_dev *hdev = vport->back;
6139
6140         return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6141 }
6142
6143 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6144                                       u16 *vport_id, u8 *action, u16 *queue_id)
6145 {
6146         struct hclge_vport *vport = hdev->vport;
6147
6148         if (ring_cookie == RX_CLS_FLOW_DISC) {
6149                 *action = HCLGE_FD_ACTION_DROP_PACKET;
6150         } else {
6151                 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6152                 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6153                 u16 tqps;
6154
6155                 if (vf > hdev->num_req_vfs) {
6156                         dev_err(&hdev->pdev->dev,
6157                                 "Error: vf id (%u) > max vf num (%u)\n",
6158                                 vf, hdev->num_req_vfs);
6159                         return -EINVAL;
6160                 }
6161
6162                 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6163                 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6164
6165                 if (ring >= tqps) {
6166                         dev_err(&hdev->pdev->dev,
6167                                 "Error: queue id (%u) > max tqp num (%u)\n",
6168                                 ring, tqps - 1);
6169                         return -EINVAL;
6170                 }
6171
6172                 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6173                 *queue_id = ring;
6174         }
6175
6176         return 0;
6177 }
6178
6179 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6180                               struct ethtool_rxnfc *cmd)
6181 {
6182         struct hclge_vport *vport = hclge_get_vport(handle);
6183         struct hclge_dev *hdev = vport->back;
6184         u16 dst_vport_id = 0, q_index = 0;
6185         struct ethtool_rx_flow_spec *fs;
6186         struct hclge_fd_rule *rule;
6187         u32 unused = 0;
6188         u8 action;
6189         int ret;
6190
6191         if (!hnae3_dev_fd_supported(hdev)) {
6192                 dev_err(&hdev->pdev->dev,
6193                         "flow table director is not supported\n");
6194                 return -EOPNOTSUPP;
6195         }
6196
6197         if (!hdev->fd_en) {
6198                 dev_err(&hdev->pdev->dev,
6199                         "please enable flow director first\n");
6200                 return -EOPNOTSUPP;
6201         }
6202
6203         if (hclge_is_cls_flower_active(handle)) {
6204                 dev_err(&hdev->pdev->dev,
6205                         "please delete all exist cls flower rules first\n");
6206                 return -EINVAL;
6207         }
6208
6209         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6210
6211         ret = hclge_fd_check_spec(hdev, fs, &unused);
6212         if (ret)
6213                 return ret;
6214
6215         ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6216                                          &action, &q_index);
6217         if (ret)
6218                 return ret;
6219
6220         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6221         if (!rule)
6222                 return -ENOMEM;
6223
6224         ret = hclge_fd_get_tuple(hdev, fs, rule);
6225         if (ret) {
6226                 kfree(rule);
6227                 return ret;
6228         }
6229
6230         rule->flow_type = fs->flow_type;
6231         rule->location = fs->location;
6232         rule->unused_tuple = unused;
6233         rule->vf_id = dst_vport_id;
6234         rule->queue_id = q_index;
6235         rule->action = action;
6236         rule->rule_type = HCLGE_FD_EP_ACTIVE;
6237
6238         /* to avoid rule conflict, when user configure rule by ethtool,
6239          * we need to clear all arfs rules
6240          */
6241         spin_lock_bh(&hdev->fd_rule_lock);
6242         hclge_clear_arfs_rules(handle);
6243
6244         ret = hclge_fd_config_rule(hdev, rule);
6245
6246         spin_unlock_bh(&hdev->fd_rule_lock);
6247
6248         return ret;
6249 }
6250
6251 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6252                               struct ethtool_rxnfc *cmd)
6253 {
6254         struct hclge_vport *vport = hclge_get_vport(handle);
6255         struct hclge_dev *hdev = vport->back;
6256         struct ethtool_rx_flow_spec *fs;
6257         int ret;
6258
6259         if (!hnae3_dev_fd_supported(hdev))
6260                 return -EOPNOTSUPP;
6261
6262         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6263
6264         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6265                 return -EINVAL;
6266
6267         if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6268             !hclge_fd_rule_exist(hdev, fs->location)) {
6269                 dev_err(&hdev->pdev->dev,
6270                         "Delete fail, rule %u is inexistent\n", fs->location);
6271                 return -ENOENT;
6272         }
6273
6274         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6275                                    NULL, false);
6276         if (ret)
6277                 return ret;
6278
6279         spin_lock_bh(&hdev->fd_rule_lock);
6280         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6281
6282         spin_unlock_bh(&hdev->fd_rule_lock);
6283
6284         return ret;
6285 }
6286
6287 /* make sure being called after lock up with fd_rule_lock */
6288 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6289                                      bool clear_list)
6290 {
6291         struct hclge_vport *vport = hclge_get_vport(handle);
6292         struct hclge_dev *hdev = vport->back;
6293         struct hclge_fd_rule *rule;
6294         struct hlist_node *node;
6295         u16 location;
6296
6297         if (!hnae3_dev_fd_supported(hdev))
6298                 return;
6299
6300         for_each_set_bit(location, hdev->fd_bmap,
6301                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6302                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6303                                      NULL, false);
6304
6305         if (clear_list) {
6306                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6307                                           rule_node) {
6308                         hlist_del(&rule->rule_node);
6309                         kfree(rule);
6310                 }
6311                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6312                 hdev->hclge_fd_rule_num = 0;
6313                 bitmap_zero(hdev->fd_bmap,
6314                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6315         }
6316 }
6317
6318 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6319 {
6320         struct hclge_vport *vport = hclge_get_vport(handle);
6321         struct hclge_dev *hdev = vport->back;
6322         struct hclge_fd_rule *rule;
6323         struct hlist_node *node;
6324         int ret;
6325
6326         /* Return ok here, because reset error handling will check this
6327          * return value. If error is returned here, the reset process will
6328          * fail.
6329          */
6330         if (!hnae3_dev_fd_supported(hdev))
6331                 return 0;
6332
6333         /* if fd is disabled, should not restore it when reset */
6334         if (!hdev->fd_en)
6335                 return 0;
6336
6337         spin_lock_bh(&hdev->fd_rule_lock);
6338         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6339                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6340                 if (!ret)
6341                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6342
6343                 if (ret) {
6344                         dev_warn(&hdev->pdev->dev,
6345                                  "Restore rule %u failed, remove it\n",
6346                                  rule->location);
6347                         clear_bit(rule->location, hdev->fd_bmap);
6348                         hlist_del(&rule->rule_node);
6349                         kfree(rule);
6350                         hdev->hclge_fd_rule_num--;
6351                 }
6352         }
6353
6354         if (hdev->hclge_fd_rule_num)
6355                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6356
6357         spin_unlock_bh(&hdev->fd_rule_lock);
6358
6359         return 0;
6360 }
6361
6362 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6363                                  struct ethtool_rxnfc *cmd)
6364 {
6365         struct hclge_vport *vport = hclge_get_vport(handle);
6366         struct hclge_dev *hdev = vport->back;
6367
6368         if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6369                 return -EOPNOTSUPP;
6370
6371         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6372         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6373
6374         return 0;
6375 }
6376
6377 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6378                                      struct ethtool_tcpip4_spec *spec,
6379                                      struct ethtool_tcpip4_spec *spec_mask)
6380 {
6381         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6382         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6383                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6384
6385         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6386         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6387                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6388
6389         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6390         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6391                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6392
6393         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6394         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6395                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6396
6397         spec->tos = rule->tuples.ip_tos;
6398         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6399                         0 : rule->tuples_mask.ip_tos;
6400 }
6401
6402 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6403                                   struct ethtool_usrip4_spec *spec,
6404                                   struct ethtool_usrip4_spec *spec_mask)
6405 {
6406         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6407         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6408                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6409
6410         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6411         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6412                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6413
6414         spec->tos = rule->tuples.ip_tos;
6415         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6416                         0 : rule->tuples_mask.ip_tos;
6417
6418         spec->proto = rule->tuples.ip_proto;
6419         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6420                         0 : rule->tuples_mask.ip_proto;
6421
6422         spec->ip_ver = ETH_RX_NFC_IP4;
6423 }
6424
6425 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6426                                      struct ethtool_tcpip6_spec *spec,
6427                                      struct ethtool_tcpip6_spec *spec_mask)
6428 {
6429         cpu_to_be32_array(spec->ip6src,
6430                           rule->tuples.src_ip, IPV6_SIZE);
6431         cpu_to_be32_array(spec->ip6dst,
6432                           rule->tuples.dst_ip, IPV6_SIZE);
6433         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6434                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6435         else
6436                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6437                                   IPV6_SIZE);
6438
6439         if (rule->unused_tuple & BIT(INNER_DST_IP))
6440                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6441         else
6442                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6443                                   IPV6_SIZE);
6444
6445         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6446         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6447                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6448
6449         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6450         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6451                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6452 }
6453
6454 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6455                                   struct ethtool_usrip6_spec *spec,
6456                                   struct ethtool_usrip6_spec *spec_mask)
6457 {
6458         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6459         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6460         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6461                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6462         else
6463                 cpu_to_be32_array(spec_mask->ip6src,
6464                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6465
6466         if (rule->unused_tuple & BIT(INNER_DST_IP))
6467                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6468         else
6469                 cpu_to_be32_array(spec_mask->ip6dst,
6470                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6471
6472         spec->l4_proto = rule->tuples.ip_proto;
6473         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6474                         0 : rule->tuples_mask.ip_proto;
6475 }
6476
6477 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6478                                     struct ethhdr *spec,
6479                                     struct ethhdr *spec_mask)
6480 {
6481         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6482         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6483
6484         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6485                 eth_zero_addr(spec_mask->h_source);
6486         else
6487                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6488
6489         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6490                 eth_zero_addr(spec_mask->h_dest);
6491         else
6492                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6493
6494         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6495         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6496                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6497 }
6498
6499 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6500                                   struct hclge_fd_rule *rule)
6501 {
6502         if (fs->flow_type & FLOW_EXT) {
6503                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6504                 fs->m_ext.vlan_tci =
6505                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6506                                 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6507         }
6508
6509         if (fs->flow_type & FLOW_MAC_EXT) {
6510                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6511                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6512                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6513                 else
6514                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6515                                         rule->tuples_mask.dst_mac);
6516         }
6517 }
6518
6519 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6520                                   struct ethtool_rxnfc *cmd)
6521 {
6522         struct hclge_vport *vport = hclge_get_vport(handle);
6523         struct hclge_fd_rule *rule = NULL;
6524         struct hclge_dev *hdev = vport->back;
6525         struct ethtool_rx_flow_spec *fs;
6526         struct hlist_node *node2;
6527
6528         if (!hnae3_dev_fd_supported(hdev))
6529                 return -EOPNOTSUPP;
6530
6531         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6532
6533         spin_lock_bh(&hdev->fd_rule_lock);
6534
6535         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6536                 if (rule->location >= fs->location)
6537                         break;
6538         }
6539
6540         if (!rule || fs->location != rule->location) {
6541                 spin_unlock_bh(&hdev->fd_rule_lock);
6542
6543                 return -ENOENT;
6544         }
6545
6546         fs->flow_type = rule->flow_type;
6547         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6548         case SCTP_V4_FLOW:
6549         case TCP_V4_FLOW:
6550         case UDP_V4_FLOW:
6551                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6552                                          &fs->m_u.tcp_ip4_spec);
6553                 break;
6554         case IP_USER_FLOW:
6555                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6556                                       &fs->m_u.usr_ip4_spec);
6557                 break;
6558         case SCTP_V6_FLOW:
6559         case TCP_V6_FLOW:
6560         case UDP_V6_FLOW:
6561                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6562                                          &fs->m_u.tcp_ip6_spec);
6563                 break;
6564         case IPV6_USER_FLOW:
6565                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6566                                       &fs->m_u.usr_ip6_spec);
6567                 break;
6568         /* The flow type of fd rule has been checked before adding in to rule
6569          * list. As other flow types have been handled, it must be ETHER_FLOW
6570          * for the default case
6571          */
6572         default:
6573                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6574                                         &fs->m_u.ether_spec);
6575                 break;
6576         }
6577
6578         hclge_fd_get_ext_info(fs, rule);
6579
6580         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6581                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6582         } else {
6583                 u64 vf_id;
6584
6585                 fs->ring_cookie = rule->queue_id;
6586                 vf_id = rule->vf_id;
6587                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6588                 fs->ring_cookie |= vf_id;
6589         }
6590
6591         spin_unlock_bh(&hdev->fd_rule_lock);
6592
6593         return 0;
6594 }
6595
6596 static int hclge_get_all_rules(struct hnae3_handle *handle,
6597                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6598 {
6599         struct hclge_vport *vport = hclge_get_vport(handle);
6600         struct hclge_dev *hdev = vport->back;
6601         struct hclge_fd_rule *rule;
6602         struct hlist_node *node2;
6603         int cnt = 0;
6604
6605         if (!hnae3_dev_fd_supported(hdev))
6606                 return -EOPNOTSUPP;
6607
6608         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6609
6610         spin_lock_bh(&hdev->fd_rule_lock);
6611         hlist_for_each_entry_safe(rule, node2,
6612                                   &hdev->fd_rule_list, rule_node) {
6613                 if (cnt == cmd->rule_cnt) {
6614                         spin_unlock_bh(&hdev->fd_rule_lock);
6615                         return -EMSGSIZE;
6616                 }
6617
6618                 rule_locs[cnt] = rule->location;
6619                 cnt++;
6620         }
6621
6622         spin_unlock_bh(&hdev->fd_rule_lock);
6623
6624         cmd->rule_cnt = cnt;
6625
6626         return 0;
6627 }
6628
6629 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6630                                      struct hclge_fd_rule_tuples *tuples)
6631 {
6632 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6633 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6634
6635         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6636         tuples->ip_proto = fkeys->basic.ip_proto;
6637         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6638
6639         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6640                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6641                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6642         } else {
6643                 int i;
6644
6645                 for (i = 0; i < IPV6_SIZE; i++) {
6646                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6647                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6648                 }
6649         }
6650 }
6651
6652 /* traverse all rules, check whether an existed rule has the same tuples */
6653 static struct hclge_fd_rule *
6654 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6655                           const struct hclge_fd_rule_tuples *tuples)
6656 {
6657         struct hclge_fd_rule *rule = NULL;
6658         struct hlist_node *node;
6659
6660         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6661                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6662                         return rule;
6663         }
6664
6665         return NULL;
6666 }
6667
6668 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6669                                      struct hclge_fd_rule *rule)
6670 {
6671         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6672                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6673                              BIT(INNER_SRC_PORT);
6674         rule->action = 0;
6675         rule->vf_id = 0;
6676         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6677         if (tuples->ether_proto == ETH_P_IP) {
6678                 if (tuples->ip_proto == IPPROTO_TCP)
6679                         rule->flow_type = TCP_V4_FLOW;
6680                 else
6681                         rule->flow_type = UDP_V4_FLOW;
6682         } else {
6683                 if (tuples->ip_proto == IPPROTO_TCP)
6684                         rule->flow_type = TCP_V6_FLOW;
6685                 else
6686                         rule->flow_type = UDP_V6_FLOW;
6687         }
6688         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6689         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6690 }
6691
6692 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6693                                       u16 flow_id, struct flow_keys *fkeys)
6694 {
6695         struct hclge_vport *vport = hclge_get_vport(handle);
6696         struct hclge_fd_rule_tuples new_tuples = {};
6697         struct hclge_dev *hdev = vport->back;
6698         struct hclge_fd_rule *rule;
6699         u16 tmp_queue_id;
6700         u16 bit_id;
6701         int ret;
6702
6703         if (!hnae3_dev_fd_supported(hdev))
6704                 return -EOPNOTSUPP;
6705
6706         /* when there is already fd rule existed add by user,
6707          * arfs should not work
6708          */
6709         spin_lock_bh(&hdev->fd_rule_lock);
6710         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6711             hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6712                 spin_unlock_bh(&hdev->fd_rule_lock);
6713                 return -EOPNOTSUPP;
6714         }
6715
6716         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6717
6718         /* check is there flow director filter existed for this flow,
6719          * if not, create a new filter for it;
6720          * if filter exist with different queue id, modify the filter;
6721          * if filter exist with same queue id, do nothing
6722          */
6723         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6724         if (!rule) {
6725                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6726                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6727                         spin_unlock_bh(&hdev->fd_rule_lock);
6728                         return -ENOSPC;
6729                 }
6730
6731                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6732                 if (!rule) {
6733                         spin_unlock_bh(&hdev->fd_rule_lock);
6734                         return -ENOMEM;
6735                 }
6736
6737                 set_bit(bit_id, hdev->fd_bmap);
6738                 rule->location = bit_id;
6739                 rule->arfs.flow_id = flow_id;
6740                 rule->queue_id = queue_id;
6741                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6742                 ret = hclge_fd_config_rule(hdev, rule);
6743
6744                 spin_unlock_bh(&hdev->fd_rule_lock);
6745
6746                 if (ret)
6747                         return ret;
6748
6749                 return rule->location;
6750         }
6751
6752         spin_unlock_bh(&hdev->fd_rule_lock);
6753
6754         if (rule->queue_id == queue_id)
6755                 return rule->location;
6756
6757         tmp_queue_id = rule->queue_id;
6758         rule->queue_id = queue_id;
6759         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6760         if (ret) {
6761                 rule->queue_id = tmp_queue_id;
6762                 return ret;
6763         }
6764
6765         return rule->location;
6766 }
6767
6768 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6769 {
6770 #ifdef CONFIG_RFS_ACCEL
6771         struct hnae3_handle *handle = &hdev->vport[0].nic;
6772         struct hclge_fd_rule *rule;
6773         struct hlist_node *node;
6774         HLIST_HEAD(del_list);
6775
6776         spin_lock_bh(&hdev->fd_rule_lock);
6777         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6778                 spin_unlock_bh(&hdev->fd_rule_lock);
6779                 return;
6780         }
6781         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6782                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6783                                         rule->arfs.flow_id, rule->location)) {
6784                         hlist_del_init(&rule->rule_node);
6785                         hlist_add_head(&rule->rule_node, &del_list);
6786                         hdev->hclge_fd_rule_num--;
6787                         clear_bit(rule->location, hdev->fd_bmap);
6788                 }
6789         }
6790         spin_unlock_bh(&hdev->fd_rule_lock);
6791
6792         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6793                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6794                                      rule->location, NULL, false);
6795                 kfree(rule);
6796         }
6797 #endif
6798 }
6799
6800 /* make sure being called after lock up with fd_rule_lock */
6801 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6802 {
6803 #ifdef CONFIG_RFS_ACCEL
6804         struct hclge_vport *vport = hclge_get_vport(handle);
6805         struct hclge_dev *hdev = vport->back;
6806
6807         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6808                 hclge_del_all_fd_entries(handle, true);
6809 #endif
6810 }
6811
6812 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6813                                     struct hclge_fd_rule *rule)
6814 {
6815         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6816                 struct flow_match_basic match;
6817                 u16 ethtype_key, ethtype_mask;
6818
6819                 flow_rule_match_basic(flow, &match);
6820                 ethtype_key = ntohs(match.key->n_proto);
6821                 ethtype_mask = ntohs(match.mask->n_proto);
6822
6823                 if (ethtype_key == ETH_P_ALL) {
6824                         ethtype_key = 0;
6825                         ethtype_mask = 0;
6826                 }
6827                 rule->tuples.ether_proto = ethtype_key;
6828                 rule->tuples_mask.ether_proto = ethtype_mask;
6829                 rule->tuples.ip_proto = match.key->ip_proto;
6830                 rule->tuples_mask.ip_proto = match.mask->ip_proto;
6831         } else {
6832                 rule->unused_tuple |= BIT(INNER_IP_PROTO);
6833                 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6834         }
6835 }
6836
6837 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6838                                   struct hclge_fd_rule *rule)
6839 {
6840         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6841                 struct flow_match_eth_addrs match;
6842
6843                 flow_rule_match_eth_addrs(flow, &match);
6844                 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6845                 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6846                 ether_addr_copy(rule->tuples.src_mac, match.key->src);
6847                 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6848         } else {
6849                 rule->unused_tuple |= BIT(INNER_DST_MAC);
6850                 rule->unused_tuple |= BIT(INNER_SRC_MAC);
6851         }
6852 }
6853
6854 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6855                                    struct hclge_fd_rule *rule)
6856 {
6857         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6858                 struct flow_match_vlan match;
6859
6860                 flow_rule_match_vlan(flow, &match);
6861                 rule->tuples.vlan_tag1 = match.key->vlan_id |
6862                                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
6863                 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6864                                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6865         } else {
6866                 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6867         }
6868 }
6869
6870 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6871                                  struct hclge_fd_rule *rule)
6872 {
6873         u16 addr_type = 0;
6874
6875         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6876                 struct flow_match_control match;
6877
6878                 flow_rule_match_control(flow, &match);
6879                 addr_type = match.key->addr_type;
6880         }
6881
6882         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6883                 struct flow_match_ipv4_addrs match;
6884
6885                 flow_rule_match_ipv4_addrs(flow, &match);
6886                 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6887                 rule->tuples_mask.src_ip[IPV4_INDEX] =
6888                                                 be32_to_cpu(match.mask->src);
6889                 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6890                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6891                                                 be32_to_cpu(match.mask->dst);
6892         } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6893                 struct flow_match_ipv6_addrs match;
6894
6895                 flow_rule_match_ipv6_addrs(flow, &match);
6896                 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6897                                   IPV6_SIZE);
6898                 be32_to_cpu_array(rule->tuples_mask.src_ip,
6899                                   match.mask->src.s6_addr32, IPV6_SIZE);
6900                 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6901                                   IPV6_SIZE);
6902                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6903                                   match.mask->dst.s6_addr32, IPV6_SIZE);
6904         } else {
6905                 rule->unused_tuple |= BIT(INNER_SRC_IP);
6906                 rule->unused_tuple |= BIT(INNER_DST_IP);
6907         }
6908 }
6909
6910 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6911                                    struct hclge_fd_rule *rule)
6912 {
6913         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6914                 struct flow_match_ports match;
6915
6916                 flow_rule_match_ports(flow, &match);
6917
6918                 rule->tuples.src_port = be16_to_cpu(match.key->src);
6919                 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6920                 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6921                 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6922         } else {
6923                 rule->unused_tuple |= BIT(INNER_SRC_PORT);
6924                 rule->unused_tuple |= BIT(INNER_DST_PORT);
6925         }
6926 }
6927
6928 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6929                                   struct flow_cls_offload *cls_flower,
6930                                   struct hclge_fd_rule *rule)
6931 {
6932         struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6933         struct flow_dissector *dissector = flow->match.dissector;
6934
6935         if (dissector->used_keys &
6936             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6937               BIT(FLOW_DISSECTOR_KEY_BASIC) |
6938               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6939               BIT(FLOW_DISSECTOR_KEY_VLAN) |
6940               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6941               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6942               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6943                 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6944                         dissector->used_keys);
6945                 return -EOPNOTSUPP;
6946         }
6947
6948         hclge_get_cls_key_basic(flow, rule);
6949         hclge_get_cls_key_mac(flow, rule);
6950         hclge_get_cls_key_vlan(flow, rule);
6951         hclge_get_cls_key_ip(flow, rule);
6952         hclge_get_cls_key_port(flow, rule);
6953
6954         return 0;
6955 }
6956
6957 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6958                                   struct flow_cls_offload *cls_flower, int tc)
6959 {
6960         u32 prio = cls_flower->common.prio;
6961
6962         if (tc < 0 || tc > hdev->tc_max) {
6963                 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6964                 return -EINVAL;
6965         }
6966
6967         if (prio == 0 ||
6968             prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6969                 dev_err(&hdev->pdev->dev,
6970                         "prio %u should be in range[1, %u]\n",
6971                         prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6972                 return -EINVAL;
6973         }
6974
6975         if (test_bit(prio - 1, hdev->fd_bmap)) {
6976                 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6977                 return -EINVAL;
6978         }
6979         return 0;
6980 }
6981
6982 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6983                                 struct flow_cls_offload *cls_flower,
6984                                 int tc)
6985 {
6986         struct hclge_vport *vport = hclge_get_vport(handle);
6987         struct hclge_dev *hdev = vport->back;
6988         struct hclge_fd_rule *rule;
6989         int ret;
6990
6991         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6992                 dev_err(&hdev->pdev->dev,
6993                         "please remove all exist fd rules via ethtool first\n");
6994                 return -EINVAL;
6995         }
6996
6997         ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6998         if (ret) {
6999                 dev_err(&hdev->pdev->dev,
7000                         "failed to check cls flower params, ret = %d\n", ret);
7001                 return ret;
7002         }
7003
7004         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7005         if (!rule)
7006                 return -ENOMEM;
7007
7008         ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7009         if (ret)
7010                 goto err;
7011
7012         rule->action = HCLGE_FD_ACTION_SELECT_TC;
7013         rule->cls_flower.tc = tc;
7014         rule->location = cls_flower->common.prio - 1;
7015         rule->vf_id = 0;
7016         rule->cls_flower.cookie = cls_flower->cookie;
7017         rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7018
7019         spin_lock_bh(&hdev->fd_rule_lock);
7020         hclge_clear_arfs_rules(handle);
7021
7022         ret = hclge_fd_config_rule(hdev, rule);
7023
7024         spin_unlock_bh(&hdev->fd_rule_lock);
7025
7026         if (ret) {
7027                 dev_err(&hdev->pdev->dev,
7028                         "failed to add cls flower rule, ret = %d\n", ret);
7029                 goto err;
7030         }
7031
7032         return 0;
7033 err:
7034         kfree(rule);
7035         return ret;
7036 }
7037
7038 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7039                                                    unsigned long cookie)
7040 {
7041         struct hclge_fd_rule *rule;
7042         struct hlist_node *node;
7043
7044         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7045                 if (rule->cls_flower.cookie == cookie)
7046                         return rule;
7047         }
7048
7049         return NULL;
7050 }
7051
7052 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7053                                 struct flow_cls_offload *cls_flower)
7054 {
7055         struct hclge_vport *vport = hclge_get_vport(handle);
7056         struct hclge_dev *hdev = vport->back;
7057         struct hclge_fd_rule *rule;
7058         int ret;
7059
7060         spin_lock_bh(&hdev->fd_rule_lock);
7061
7062         rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7063         if (!rule) {
7064                 spin_unlock_bh(&hdev->fd_rule_lock);
7065                 return -EINVAL;
7066         }
7067
7068         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7069                                    NULL, false);
7070         if (ret) {
7071                 dev_err(&hdev->pdev->dev,
7072                         "failed to delete cls flower rule %u, ret = %d\n",
7073                         rule->location, ret);
7074                 spin_unlock_bh(&hdev->fd_rule_lock);
7075                 return ret;
7076         }
7077
7078         ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
7079         if (ret) {
7080                 dev_err(&hdev->pdev->dev,
7081                         "failed to delete cls flower rule %u in list, ret = %d\n",
7082                         rule->location, ret);
7083                 spin_unlock_bh(&hdev->fd_rule_lock);
7084                 return ret;
7085         }
7086
7087         spin_unlock_bh(&hdev->fd_rule_lock);
7088
7089         return 0;
7090 }
7091
7092 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7093 {
7094         struct hclge_vport *vport = hclge_get_vport(handle);
7095         struct hclge_dev *hdev = vport->back;
7096
7097         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7098                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7099 }
7100
7101 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7102 {
7103         struct hclge_vport *vport = hclge_get_vport(handle);
7104         struct hclge_dev *hdev = vport->back;
7105
7106         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7107 }
7108
7109 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7110 {
7111         struct hclge_vport *vport = hclge_get_vport(handle);
7112         struct hclge_dev *hdev = vport->back;
7113
7114         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7115 }
7116
7117 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7118 {
7119         struct hclge_vport *vport = hclge_get_vport(handle);
7120         struct hclge_dev *hdev = vport->back;
7121
7122         return hdev->rst_stats.hw_reset_done_cnt;
7123 }
7124
7125 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7126 {
7127         struct hclge_vport *vport = hclge_get_vport(handle);
7128         struct hclge_dev *hdev = vport->back;
7129         bool clear;
7130
7131         hdev->fd_en = enable;
7132         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7133
7134         if (!enable) {
7135                 spin_lock_bh(&hdev->fd_rule_lock);
7136                 hclge_del_all_fd_entries(handle, clear);
7137                 spin_unlock_bh(&hdev->fd_rule_lock);
7138         } else {
7139                 hclge_restore_fd_entries(handle);
7140         }
7141 }
7142
7143 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7144 {
7145         struct hclge_desc desc;
7146         struct hclge_config_mac_mode_cmd *req =
7147                 (struct hclge_config_mac_mode_cmd *)desc.data;
7148         u32 loop_en = 0;
7149         int ret;
7150
7151         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7152
7153         if (enable) {
7154                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7155                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7156                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7157                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7158                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7159                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7160                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7161                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7162                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7163                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7164         }
7165
7166         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7167
7168         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7169         if (ret)
7170                 dev_err(&hdev->pdev->dev,
7171                         "mac enable fail, ret =%d.\n", ret);
7172 }
7173
7174 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7175                                      u8 switch_param, u8 param_mask)
7176 {
7177         struct hclge_mac_vlan_switch_cmd *req;
7178         struct hclge_desc desc;
7179         u32 func_id;
7180         int ret;
7181
7182         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7183         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7184
7185         /* read current config parameter */
7186         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7187                                    true);
7188         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7189         req->func_id = cpu_to_le32(func_id);
7190
7191         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7192         if (ret) {
7193                 dev_err(&hdev->pdev->dev,
7194                         "read mac vlan switch parameter fail, ret = %d\n", ret);
7195                 return ret;
7196         }
7197
7198         /* modify and write new config parameter */
7199         hclge_cmd_reuse_desc(&desc, false);
7200         req->switch_param = (req->switch_param & param_mask) | switch_param;
7201         req->param_mask = param_mask;
7202
7203         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7204         if (ret)
7205                 dev_err(&hdev->pdev->dev,
7206                         "set mac vlan switch parameter fail, ret = %d\n", ret);
7207         return ret;
7208 }
7209
7210 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7211                                        int link_ret)
7212 {
7213 #define HCLGE_PHY_LINK_STATUS_NUM  200
7214
7215         struct phy_device *phydev = hdev->hw.mac.phydev;
7216         int i = 0;
7217         int ret;
7218
7219         do {
7220                 ret = phy_read_status(phydev);
7221                 if (ret) {
7222                         dev_err(&hdev->pdev->dev,
7223                                 "phy update link status fail, ret = %d\n", ret);
7224                         return;
7225                 }
7226
7227                 if (phydev->link == link_ret)
7228                         break;
7229
7230                 msleep(HCLGE_LINK_STATUS_MS);
7231         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7232 }
7233
7234 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7235 {
7236 #define HCLGE_MAC_LINK_STATUS_NUM  100
7237
7238         int link_status;
7239         int i = 0;
7240         int ret;
7241
7242         do {
7243                 ret = hclge_get_mac_link_status(hdev, &link_status);
7244                 if (ret)
7245                         return ret;
7246                 if (link_status == link_ret)
7247                         return 0;
7248
7249                 msleep(HCLGE_LINK_STATUS_MS);
7250         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7251         return -EBUSY;
7252 }
7253
7254 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7255                                           bool is_phy)
7256 {
7257         int link_ret;
7258
7259         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7260
7261         if (is_phy)
7262                 hclge_phy_link_status_wait(hdev, link_ret);
7263
7264         return hclge_mac_link_status_wait(hdev, link_ret);
7265 }
7266
7267 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7268 {
7269         struct hclge_config_mac_mode_cmd *req;
7270         struct hclge_desc desc;
7271         u32 loop_en;
7272         int ret;
7273
7274         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7275         /* 1 Read out the MAC mode config at first */
7276         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7277         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7278         if (ret) {
7279                 dev_err(&hdev->pdev->dev,
7280                         "mac loopback get fail, ret =%d.\n", ret);
7281                 return ret;
7282         }
7283
7284         /* 2 Then setup the loopback flag */
7285         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7286         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7287
7288         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7289
7290         /* 3 Config mac work mode with loopback flag
7291          * and its original configure parameters
7292          */
7293         hclge_cmd_reuse_desc(&desc, false);
7294         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7295         if (ret)
7296                 dev_err(&hdev->pdev->dev,
7297                         "mac loopback set fail, ret =%d.\n", ret);
7298         return ret;
7299 }
7300
7301 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7302                                      enum hnae3_loop loop_mode)
7303 {
7304 #define HCLGE_COMMON_LB_RETRY_MS        10
7305 #define HCLGE_COMMON_LB_RETRY_NUM       100
7306
7307         struct hclge_common_lb_cmd *req;
7308         struct hclge_desc desc;
7309         int ret, i = 0;
7310         u8 loop_mode_b;
7311
7312         req = (struct hclge_common_lb_cmd *)desc.data;
7313         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7314
7315         switch (loop_mode) {
7316         case HNAE3_LOOP_SERIAL_SERDES:
7317                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7318                 break;
7319         case HNAE3_LOOP_PARALLEL_SERDES:
7320                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7321                 break;
7322         case HNAE3_LOOP_PHY:
7323                 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7324                 break;
7325         default:
7326                 dev_err(&hdev->pdev->dev,
7327                         "unsupported common loopback mode %d\n", loop_mode);
7328                 return -ENOTSUPP;
7329         }
7330
7331         if (en) {
7332                 req->enable = loop_mode_b;
7333                 req->mask = loop_mode_b;
7334         } else {
7335                 req->mask = loop_mode_b;
7336         }
7337
7338         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7339         if (ret) {
7340                 dev_err(&hdev->pdev->dev,
7341                         "common loopback set fail, ret = %d\n", ret);
7342                 return ret;
7343         }
7344
7345         do {
7346                 msleep(HCLGE_COMMON_LB_RETRY_MS);
7347                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7348                                            true);
7349                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7350                 if (ret) {
7351                         dev_err(&hdev->pdev->dev,
7352                                 "common loopback get, ret = %d\n", ret);
7353                         return ret;
7354                 }
7355         } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7356                  !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7357
7358         if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7359                 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7360                 return -EBUSY;
7361         } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7362                 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7363                 return -EIO;
7364         }
7365         return ret;
7366 }
7367
7368 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7369                                      enum hnae3_loop loop_mode)
7370 {
7371         int ret;
7372
7373         ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7374         if (ret)
7375                 return ret;
7376
7377         hclge_cfg_mac_mode(hdev, en);
7378
7379         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7380         if (ret)
7381                 dev_err(&hdev->pdev->dev,
7382                         "serdes loopback config mac mode timeout\n");
7383
7384         return ret;
7385 }
7386
7387 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7388                                      struct phy_device *phydev)
7389 {
7390         int ret;
7391
7392         if (!phydev->suspended) {
7393                 ret = phy_suspend(phydev);
7394                 if (ret)
7395                         return ret;
7396         }
7397
7398         ret = phy_resume(phydev);
7399         if (ret)
7400                 return ret;
7401
7402         return phy_loopback(phydev, true);
7403 }
7404
7405 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7406                                       struct phy_device *phydev)
7407 {
7408         int ret;
7409
7410         ret = phy_loopback(phydev, false);
7411         if (ret)
7412                 return ret;
7413
7414         return phy_suspend(phydev);
7415 }
7416
7417 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7418 {
7419         struct phy_device *phydev = hdev->hw.mac.phydev;
7420         int ret;
7421
7422         if (!phydev) {
7423                 if (hnae3_dev_phy_imp_supported(hdev))
7424                         return hclge_set_common_loopback(hdev, en,
7425                                                          HNAE3_LOOP_PHY);
7426                 return -ENOTSUPP;
7427         }
7428
7429         if (en)
7430                 ret = hclge_enable_phy_loopback(hdev, phydev);
7431         else
7432                 ret = hclge_disable_phy_loopback(hdev, phydev);
7433         if (ret) {
7434                 dev_err(&hdev->pdev->dev,
7435                         "set phy loopback fail, ret = %d\n", ret);
7436                 return ret;
7437         }
7438
7439         hclge_cfg_mac_mode(hdev, en);
7440
7441         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7442         if (ret)
7443                 dev_err(&hdev->pdev->dev,
7444                         "phy loopback config mac mode timeout\n");
7445
7446         return ret;
7447 }
7448
7449 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7450                             int stream_id, bool enable)
7451 {
7452         struct hclge_desc desc;
7453         struct hclge_cfg_com_tqp_queue_cmd *req =
7454                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7455         int ret;
7456
7457         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7458         req->tqp_id = cpu_to_le16(tqp_id);
7459         req->stream_id = cpu_to_le16(stream_id);
7460         if (enable)
7461                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7462
7463         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7464         if (ret)
7465                 dev_err(&hdev->pdev->dev,
7466                         "Tqp enable fail, status =%d.\n", ret);
7467         return ret;
7468 }
7469
7470 static int hclge_set_loopback(struct hnae3_handle *handle,
7471                               enum hnae3_loop loop_mode, bool en)
7472 {
7473         struct hclge_vport *vport = hclge_get_vport(handle);
7474         struct hnae3_knic_private_info *kinfo;
7475         struct hclge_dev *hdev = vport->back;
7476         int i, ret;
7477
7478         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7479          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7480          * the same, the packets are looped back in the SSU. If SSU loopback
7481          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7482          */
7483         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7484                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7485
7486                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7487                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
7488                 if (ret)
7489                         return ret;
7490         }
7491
7492         switch (loop_mode) {
7493         case HNAE3_LOOP_APP:
7494                 ret = hclge_set_app_loopback(hdev, en);
7495                 break;
7496         case HNAE3_LOOP_SERIAL_SERDES:
7497         case HNAE3_LOOP_PARALLEL_SERDES:
7498                 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7499                 break;
7500         case HNAE3_LOOP_PHY:
7501                 ret = hclge_set_phy_loopback(hdev, en);
7502                 break;
7503         default:
7504                 ret = -ENOTSUPP;
7505                 dev_err(&hdev->pdev->dev,
7506                         "loop_mode %d is not supported\n", loop_mode);
7507                 break;
7508         }
7509
7510         if (ret)
7511                 return ret;
7512
7513         kinfo = &vport->nic.kinfo;
7514         for (i = 0; i < kinfo->num_tqps; i++) {
7515                 ret = hclge_tqp_enable(hdev, i, 0, en);
7516                 if (ret)
7517                         return ret;
7518         }
7519
7520         return 0;
7521 }
7522
7523 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7524 {
7525         int ret;
7526
7527         ret = hclge_set_app_loopback(hdev, false);
7528         if (ret)
7529                 return ret;
7530
7531         ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7532         if (ret)
7533                 return ret;
7534
7535         return hclge_cfg_common_loopback(hdev, false,
7536                                          HNAE3_LOOP_PARALLEL_SERDES);
7537 }
7538
7539 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7540 {
7541         struct hclge_vport *vport = hclge_get_vport(handle);
7542         struct hnae3_knic_private_info *kinfo;
7543         struct hnae3_queue *queue;
7544         struct hclge_tqp *tqp;
7545         int i;
7546
7547         kinfo = &vport->nic.kinfo;
7548         for (i = 0; i < kinfo->num_tqps; i++) {
7549                 queue = handle->kinfo.tqp[i];
7550                 tqp = container_of(queue, struct hclge_tqp, q);
7551                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7552         }
7553 }
7554
7555 static void hclge_flush_link_update(struct hclge_dev *hdev)
7556 {
7557 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
7558
7559         unsigned long last = hdev->serv_processed_cnt;
7560         int i = 0;
7561
7562         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7563                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7564                last == hdev->serv_processed_cnt)
7565                 usleep_range(1, 1);
7566 }
7567
7568 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7569 {
7570         struct hclge_vport *vport = hclge_get_vport(handle);
7571         struct hclge_dev *hdev = vport->back;
7572
7573         if (enable) {
7574                 hclge_task_schedule(hdev, 0);
7575         } else {
7576                 /* Set the DOWN flag here to disable link updating */
7577                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7578
7579                 /* flush memory to make sure DOWN is seen by service task */
7580                 smp_mb__before_atomic();
7581                 hclge_flush_link_update(hdev);
7582         }
7583 }
7584
7585 static int hclge_ae_start(struct hnae3_handle *handle)
7586 {
7587         struct hclge_vport *vport = hclge_get_vport(handle);
7588         struct hclge_dev *hdev = vport->back;
7589
7590         /* mac enable */
7591         hclge_cfg_mac_mode(hdev, true);
7592         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7593         hdev->hw.mac.link = 0;
7594
7595         /* reset tqp stats */
7596         hclge_reset_tqp_stats(handle);
7597
7598         hclge_mac_start_phy(hdev);
7599
7600         return 0;
7601 }
7602
7603 static void hclge_ae_stop(struct hnae3_handle *handle)
7604 {
7605         struct hclge_vport *vport = hclge_get_vport(handle);
7606         struct hclge_dev *hdev = vport->back;
7607         int i;
7608
7609         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7610         spin_lock_bh(&hdev->fd_rule_lock);
7611         hclge_clear_arfs_rules(handle);
7612         spin_unlock_bh(&hdev->fd_rule_lock);
7613
7614         /* If it is not PF reset, the firmware will disable the MAC,
7615          * so it only need to stop phy here.
7616          */
7617         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7618             hdev->reset_type != HNAE3_FUNC_RESET) {
7619                 hclge_mac_stop_phy(hdev);
7620                 hclge_update_link_status(hdev);
7621                 return;
7622         }
7623
7624         for (i = 0; i < handle->kinfo.num_tqps; i++)
7625                 hclge_reset_tqp(handle, i);
7626
7627         hclge_config_mac_tnl_int(hdev, false);
7628
7629         /* Mac disable */
7630         hclge_cfg_mac_mode(hdev, false);
7631
7632         hclge_mac_stop_phy(hdev);
7633
7634         /* reset tqp stats */
7635         hclge_reset_tqp_stats(handle);
7636         hclge_update_link_status(hdev);
7637 }
7638
7639 int hclge_vport_start(struct hclge_vport *vport)
7640 {
7641         struct hclge_dev *hdev = vport->back;
7642
7643         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7644         vport->last_active_jiffies = jiffies;
7645
7646         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7647                 if (vport->vport_id) {
7648                         hclge_restore_mac_table_common(vport);
7649                         hclge_restore_vport_vlan_table(vport);
7650                 } else {
7651                         hclge_restore_hw_table(hdev);
7652                 }
7653         }
7654
7655         clear_bit(vport->vport_id, hdev->vport_config_block);
7656
7657         return 0;
7658 }
7659
7660 void hclge_vport_stop(struct hclge_vport *vport)
7661 {
7662         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7663 }
7664
7665 static int hclge_client_start(struct hnae3_handle *handle)
7666 {
7667         struct hclge_vport *vport = hclge_get_vport(handle);
7668
7669         return hclge_vport_start(vport);
7670 }
7671
7672 static void hclge_client_stop(struct hnae3_handle *handle)
7673 {
7674         struct hclge_vport *vport = hclge_get_vport(handle);
7675
7676         hclge_vport_stop(vport);
7677 }
7678
7679 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7680                                          u16 cmdq_resp, u8  resp_code,
7681                                          enum hclge_mac_vlan_tbl_opcode op)
7682 {
7683         struct hclge_dev *hdev = vport->back;
7684
7685         if (cmdq_resp) {
7686                 dev_err(&hdev->pdev->dev,
7687                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7688                         cmdq_resp);
7689                 return -EIO;
7690         }
7691
7692         if (op == HCLGE_MAC_VLAN_ADD) {
7693                 if (!resp_code || resp_code == 1)
7694                         return 0;
7695                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7696                          resp_code == HCLGE_ADD_MC_OVERFLOW)
7697                         return -ENOSPC;
7698
7699                 dev_err(&hdev->pdev->dev,
7700                         "add mac addr failed for undefined, code=%u.\n",
7701                         resp_code);
7702                 return -EIO;
7703         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7704                 if (!resp_code) {
7705                         return 0;
7706                 } else if (resp_code == 1) {
7707                         dev_dbg(&hdev->pdev->dev,
7708                                 "remove mac addr failed for miss.\n");
7709                         return -ENOENT;
7710                 }
7711
7712                 dev_err(&hdev->pdev->dev,
7713                         "remove mac addr failed for undefined, code=%u.\n",
7714                         resp_code);
7715                 return -EIO;
7716         } else if (op == HCLGE_MAC_VLAN_LKUP) {
7717                 if (!resp_code) {
7718                         return 0;
7719                 } else if (resp_code == 1) {
7720                         dev_dbg(&hdev->pdev->dev,
7721                                 "lookup mac addr failed for miss.\n");
7722                         return -ENOENT;
7723                 }
7724
7725                 dev_err(&hdev->pdev->dev,
7726                         "lookup mac addr failed for undefined, code=%u.\n",
7727                         resp_code);
7728                 return -EIO;
7729         }
7730
7731         dev_err(&hdev->pdev->dev,
7732                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7733
7734         return -EINVAL;
7735 }
7736
7737 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7738 {
7739 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7740
7741         unsigned int word_num;
7742         unsigned int bit_num;
7743
7744         if (vfid > 255 || vfid < 0)
7745                 return -EIO;
7746
7747         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7748                 word_num = vfid / 32;
7749                 bit_num  = vfid % 32;
7750                 if (clr)
7751                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7752                 else
7753                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7754         } else {
7755                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7756                 bit_num  = vfid % 32;
7757                 if (clr)
7758                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7759                 else
7760                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7761         }
7762
7763         return 0;
7764 }
7765
7766 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7767 {
7768 #define HCLGE_DESC_NUMBER 3
7769 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7770         int i, j;
7771
7772         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7773                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7774                         if (desc[i].data[j])
7775                                 return false;
7776
7777         return true;
7778 }
7779
7780 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7781                                    const u8 *addr, bool is_mc)
7782 {
7783         const unsigned char *mac_addr = addr;
7784         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7785                        (mac_addr[0]) | (mac_addr[1] << 8);
7786         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7787
7788         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7789         if (is_mc) {
7790                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7791                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7792         }
7793
7794         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7795         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7796 }
7797
7798 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7799                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
7800 {
7801         struct hclge_dev *hdev = vport->back;
7802         struct hclge_desc desc;
7803         u8 resp_code;
7804         u16 retval;
7805         int ret;
7806
7807         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7808
7809         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7810
7811         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7812         if (ret) {
7813                 dev_err(&hdev->pdev->dev,
7814                         "del mac addr failed for cmd_send, ret =%d.\n",
7815                         ret);
7816                 return ret;
7817         }
7818         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7819         retval = le16_to_cpu(desc.retval);
7820
7821         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7822                                              HCLGE_MAC_VLAN_REMOVE);
7823 }
7824
7825 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7826                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7827                                      struct hclge_desc *desc,
7828                                      bool is_mc)
7829 {
7830         struct hclge_dev *hdev = vport->back;
7831         u8 resp_code;
7832         u16 retval;
7833         int ret;
7834
7835         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7836         if (is_mc) {
7837                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7838                 memcpy(desc[0].data,
7839                        req,
7840                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7841                 hclge_cmd_setup_basic_desc(&desc[1],
7842                                            HCLGE_OPC_MAC_VLAN_ADD,
7843                                            true);
7844                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7845                 hclge_cmd_setup_basic_desc(&desc[2],
7846                                            HCLGE_OPC_MAC_VLAN_ADD,
7847                                            true);
7848                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7849         } else {
7850                 memcpy(desc[0].data,
7851                        req,
7852                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7853                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7854         }
7855         if (ret) {
7856                 dev_err(&hdev->pdev->dev,
7857                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7858                         ret);
7859                 return ret;
7860         }
7861         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7862         retval = le16_to_cpu(desc[0].retval);
7863
7864         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7865                                              HCLGE_MAC_VLAN_LKUP);
7866 }
7867
7868 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7869                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7870                                   struct hclge_desc *mc_desc)
7871 {
7872         struct hclge_dev *hdev = vport->back;
7873         int cfg_status;
7874         u8 resp_code;
7875         u16 retval;
7876         int ret;
7877
7878         if (!mc_desc) {
7879                 struct hclge_desc desc;
7880
7881                 hclge_cmd_setup_basic_desc(&desc,
7882                                            HCLGE_OPC_MAC_VLAN_ADD,
7883                                            false);
7884                 memcpy(desc.data, req,
7885                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7886                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7887                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7888                 retval = le16_to_cpu(desc.retval);
7889
7890                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7891                                                            resp_code,
7892                                                            HCLGE_MAC_VLAN_ADD);
7893         } else {
7894                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7895                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7896                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7897                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7898                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7899                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7900                 memcpy(mc_desc[0].data, req,
7901                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7902                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7903                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7904                 retval = le16_to_cpu(mc_desc[0].retval);
7905
7906                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7907                                                            resp_code,
7908                                                            HCLGE_MAC_VLAN_ADD);
7909         }
7910
7911         if (ret) {
7912                 dev_err(&hdev->pdev->dev,
7913                         "add mac addr failed for cmd_send, ret =%d.\n",
7914                         ret);
7915                 return ret;
7916         }
7917
7918         return cfg_status;
7919 }
7920
7921 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7922                                u16 *allocated_size)
7923 {
7924         struct hclge_umv_spc_alc_cmd *req;
7925         struct hclge_desc desc;
7926         int ret;
7927
7928         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7929         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7930
7931         req->space_size = cpu_to_le32(space_size);
7932
7933         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7934         if (ret) {
7935                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7936                         ret);
7937                 return ret;
7938         }
7939
7940         *allocated_size = le32_to_cpu(desc.data[1]);
7941
7942         return 0;
7943 }
7944
7945 static int hclge_init_umv_space(struct hclge_dev *hdev)
7946 {
7947         u16 allocated_size = 0;
7948         int ret;
7949
7950         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7951         if (ret)
7952                 return ret;
7953
7954         if (allocated_size < hdev->wanted_umv_size)
7955                 dev_warn(&hdev->pdev->dev,
7956                          "failed to alloc umv space, want %u, get %u\n",
7957                          hdev->wanted_umv_size, allocated_size);
7958
7959         hdev->max_umv_size = allocated_size;
7960         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7961         hdev->share_umv_size = hdev->priv_umv_size +
7962                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7963
7964         return 0;
7965 }
7966
7967 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7968 {
7969         struct hclge_vport *vport;
7970         int i;
7971
7972         for (i = 0; i < hdev->num_alloc_vport; i++) {
7973                 vport = &hdev->vport[i];
7974                 vport->used_umv_num = 0;
7975         }
7976
7977         mutex_lock(&hdev->vport_lock);
7978         hdev->share_umv_size = hdev->priv_umv_size +
7979                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7980         mutex_unlock(&hdev->vport_lock);
7981 }
7982
7983 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7984 {
7985         struct hclge_dev *hdev = vport->back;
7986         bool is_full;
7987
7988         if (need_lock)
7989                 mutex_lock(&hdev->vport_lock);
7990
7991         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7992                    hdev->share_umv_size == 0);
7993
7994         if (need_lock)
7995                 mutex_unlock(&hdev->vport_lock);
7996
7997         return is_full;
7998 }
7999
8000 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8001 {
8002         struct hclge_dev *hdev = vport->back;
8003
8004         if (is_free) {
8005                 if (vport->used_umv_num > hdev->priv_umv_size)
8006                         hdev->share_umv_size++;
8007
8008                 if (vport->used_umv_num > 0)
8009                         vport->used_umv_num--;
8010         } else {
8011                 if (vport->used_umv_num >= hdev->priv_umv_size &&
8012                     hdev->share_umv_size > 0)
8013                         hdev->share_umv_size--;
8014                 vport->used_umv_num++;
8015         }
8016 }
8017
8018 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8019                                                   const u8 *mac_addr)
8020 {
8021         struct hclge_mac_node *mac_node, *tmp;
8022
8023         list_for_each_entry_safe(mac_node, tmp, list, node)
8024                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8025                         return mac_node;
8026
8027         return NULL;
8028 }
8029
8030 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8031                                   enum HCLGE_MAC_NODE_STATE state)
8032 {
8033         switch (state) {
8034         /* from set_rx_mode or tmp_add_list */
8035         case HCLGE_MAC_TO_ADD:
8036                 if (mac_node->state == HCLGE_MAC_TO_DEL)
8037                         mac_node->state = HCLGE_MAC_ACTIVE;
8038                 break;
8039         /* only from set_rx_mode */
8040         case HCLGE_MAC_TO_DEL:
8041                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8042                         list_del(&mac_node->node);
8043                         kfree(mac_node);
8044                 } else {
8045                         mac_node->state = HCLGE_MAC_TO_DEL;
8046                 }
8047                 break;
8048         /* only from tmp_add_list, the mac_node->state won't be
8049          * ACTIVE.
8050          */
8051         case HCLGE_MAC_ACTIVE:
8052                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8053                         mac_node->state = HCLGE_MAC_ACTIVE;
8054
8055                 break;
8056         }
8057 }
8058
8059 int hclge_update_mac_list(struct hclge_vport *vport,
8060                           enum HCLGE_MAC_NODE_STATE state,
8061                           enum HCLGE_MAC_ADDR_TYPE mac_type,
8062                           const unsigned char *addr)
8063 {
8064         struct hclge_dev *hdev = vport->back;
8065         struct hclge_mac_node *mac_node;
8066         struct list_head *list;
8067
8068         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8069                 &vport->uc_mac_list : &vport->mc_mac_list;
8070
8071         spin_lock_bh(&vport->mac_list_lock);
8072
8073         /* if the mac addr is already in the mac list, no need to add a new
8074          * one into it, just check the mac addr state, convert it to a new
8075          * new state, or just remove it, or do nothing.
8076          */
8077         mac_node = hclge_find_mac_node(list, addr);
8078         if (mac_node) {
8079                 hclge_update_mac_node(mac_node, state);
8080                 spin_unlock_bh(&vport->mac_list_lock);
8081                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8082                 return 0;
8083         }
8084
8085         /* if this address is never added, unnecessary to delete */
8086         if (state == HCLGE_MAC_TO_DEL) {
8087                 spin_unlock_bh(&vport->mac_list_lock);
8088                 dev_err(&hdev->pdev->dev,
8089                         "failed to delete address %pM from mac list\n",
8090                         addr);
8091                 return -ENOENT;
8092         }
8093
8094         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8095         if (!mac_node) {
8096                 spin_unlock_bh(&vport->mac_list_lock);
8097                 return -ENOMEM;
8098         }
8099
8100         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8101
8102         mac_node->state = state;
8103         ether_addr_copy(mac_node->mac_addr, addr);
8104         list_add_tail(&mac_node->node, list);
8105
8106         spin_unlock_bh(&vport->mac_list_lock);
8107
8108         return 0;
8109 }
8110
8111 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8112                              const unsigned char *addr)
8113 {
8114         struct hclge_vport *vport = hclge_get_vport(handle);
8115
8116         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8117                                      addr);
8118 }
8119
8120 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8121                              const unsigned char *addr)
8122 {
8123         struct hclge_dev *hdev = vport->back;
8124         struct hclge_mac_vlan_tbl_entry_cmd req;
8125         struct hclge_desc desc;
8126         u16 egress_port = 0;
8127         int ret;
8128
8129         /* mac addr check */
8130         if (is_zero_ether_addr(addr) ||
8131             is_broadcast_ether_addr(addr) ||
8132             is_multicast_ether_addr(addr)) {
8133                 dev_err(&hdev->pdev->dev,
8134                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8135                          addr, is_zero_ether_addr(addr),
8136                          is_broadcast_ether_addr(addr),
8137                          is_multicast_ether_addr(addr));
8138                 return -EINVAL;
8139         }
8140
8141         memset(&req, 0, sizeof(req));
8142
8143         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8144                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8145
8146         req.egress_port = cpu_to_le16(egress_port);
8147
8148         hclge_prepare_mac_addr(&req, addr, false);
8149
8150         /* Lookup the mac address in the mac_vlan table, and add
8151          * it if the entry is inexistent. Repeated unicast entry
8152          * is not allowed in the mac vlan table.
8153          */
8154         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8155         if (ret == -ENOENT) {
8156                 mutex_lock(&hdev->vport_lock);
8157                 if (!hclge_is_umv_space_full(vport, false)) {
8158                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8159                         if (!ret)
8160                                 hclge_update_umv_space(vport, false);
8161                         mutex_unlock(&hdev->vport_lock);
8162                         return ret;
8163                 }
8164                 mutex_unlock(&hdev->vport_lock);
8165
8166                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8167                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8168                                 hdev->priv_umv_size);
8169
8170                 return -ENOSPC;
8171         }
8172
8173         /* check if we just hit the duplicate */
8174         if (!ret) {
8175                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8176                          vport->vport_id, addr);
8177                 return 0;
8178         }
8179
8180         dev_err(&hdev->pdev->dev,
8181                 "PF failed to add unicast entry(%pM) in the MAC table\n",
8182                 addr);
8183
8184         return ret;
8185 }
8186
8187 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8188                             const unsigned char *addr)
8189 {
8190         struct hclge_vport *vport = hclge_get_vport(handle);
8191
8192         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8193                                      addr);
8194 }
8195
8196 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8197                             const unsigned char *addr)
8198 {
8199         struct hclge_dev *hdev = vport->back;
8200         struct hclge_mac_vlan_tbl_entry_cmd req;
8201         int ret;
8202
8203         /* mac addr check */
8204         if (is_zero_ether_addr(addr) ||
8205             is_broadcast_ether_addr(addr) ||
8206             is_multicast_ether_addr(addr)) {
8207                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8208                         addr);
8209                 return -EINVAL;
8210         }
8211
8212         memset(&req, 0, sizeof(req));
8213         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8214         hclge_prepare_mac_addr(&req, addr, false);
8215         ret = hclge_remove_mac_vlan_tbl(vport, &req);
8216         if (!ret) {
8217                 mutex_lock(&hdev->vport_lock);
8218                 hclge_update_umv_space(vport, true);
8219                 mutex_unlock(&hdev->vport_lock);
8220         } else if (ret == -ENOENT) {
8221                 ret = 0;
8222         }
8223
8224         return ret;
8225 }
8226
8227 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8228                              const unsigned char *addr)
8229 {
8230         struct hclge_vport *vport = hclge_get_vport(handle);
8231
8232         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8233                                      addr);
8234 }
8235
8236 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8237                              const unsigned char *addr)
8238 {
8239         struct hclge_dev *hdev = vport->back;
8240         struct hclge_mac_vlan_tbl_entry_cmd req;
8241         struct hclge_desc desc[3];
8242         int status;
8243
8244         /* mac addr check */
8245         if (!is_multicast_ether_addr(addr)) {
8246                 dev_err(&hdev->pdev->dev,
8247                         "Add mc mac err! invalid mac:%pM.\n",
8248                          addr);
8249                 return -EINVAL;
8250         }
8251         memset(&req, 0, sizeof(req));
8252         hclge_prepare_mac_addr(&req, addr, true);
8253         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8254         if (status) {
8255                 /* This mac addr do not exist, add new entry for it */
8256                 memset(desc[0].data, 0, sizeof(desc[0].data));
8257                 memset(desc[1].data, 0, sizeof(desc[0].data));
8258                 memset(desc[2].data, 0, sizeof(desc[0].data));
8259         }
8260         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8261         if (status)
8262                 return status;
8263         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8264
8265         /* if already overflow, not to print each time */
8266         if (status == -ENOSPC &&
8267             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8268                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8269
8270         return status;
8271 }
8272
8273 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8274                             const unsigned char *addr)
8275 {
8276         struct hclge_vport *vport = hclge_get_vport(handle);
8277
8278         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8279                                      addr);
8280 }
8281
8282 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8283                             const unsigned char *addr)
8284 {
8285         struct hclge_dev *hdev = vport->back;
8286         struct hclge_mac_vlan_tbl_entry_cmd req;
8287         enum hclge_cmd_status status;
8288         struct hclge_desc desc[3];
8289
8290         /* mac addr check */
8291         if (!is_multicast_ether_addr(addr)) {
8292                 dev_dbg(&hdev->pdev->dev,
8293                         "Remove mc mac err! invalid mac:%pM.\n",
8294                          addr);
8295                 return -EINVAL;
8296         }
8297
8298         memset(&req, 0, sizeof(req));
8299         hclge_prepare_mac_addr(&req, addr, true);
8300         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8301         if (!status) {
8302                 /* This mac addr exist, remove this handle's VFID for it */
8303                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8304                 if (status)
8305                         return status;
8306
8307                 if (hclge_is_all_function_id_zero(desc))
8308                         /* All the vfid is zero, so need to delete this entry */
8309                         status = hclge_remove_mac_vlan_tbl(vport, &req);
8310                 else
8311                         /* Not all the vfid is zero, update the vfid */
8312                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8313
8314         } else if (status == -ENOENT) {
8315                 status = 0;
8316         }
8317
8318         return status;
8319 }
8320
8321 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8322                                       struct list_head *list,
8323                                       int (*sync)(struct hclge_vport *,
8324                                                   const unsigned char *))
8325 {
8326         struct hclge_mac_node *mac_node, *tmp;
8327         int ret;
8328
8329         list_for_each_entry_safe(mac_node, tmp, list, node) {
8330                 ret = sync(vport, mac_node->mac_addr);
8331                 if (!ret) {
8332                         mac_node->state = HCLGE_MAC_ACTIVE;
8333                 } else {
8334                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8335                                 &vport->state);
8336                         break;
8337                 }
8338         }
8339 }
8340
8341 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8342                                         struct list_head *list,
8343                                         int (*unsync)(struct hclge_vport *,
8344                                                       const unsigned char *))
8345 {
8346         struct hclge_mac_node *mac_node, *tmp;
8347         int ret;
8348
8349         list_for_each_entry_safe(mac_node, tmp, list, node) {
8350                 ret = unsync(vport, mac_node->mac_addr);
8351                 if (!ret || ret == -ENOENT) {
8352                         list_del(&mac_node->node);
8353                         kfree(mac_node);
8354                 } else {
8355                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8356                                 &vport->state);
8357                         break;
8358                 }
8359         }
8360 }
8361
8362 static bool hclge_sync_from_add_list(struct list_head *add_list,
8363                                      struct list_head *mac_list)
8364 {
8365         struct hclge_mac_node *mac_node, *tmp, *new_node;
8366         bool all_added = true;
8367
8368         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8369                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8370                         all_added = false;
8371
8372                 /* if the mac address from tmp_add_list is not in the
8373                  * uc/mc_mac_list, it means have received a TO_DEL request
8374                  * during the time window of adding the mac address into mac
8375                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8376                  * then it will be removed at next time. else it must be TO_ADD,
8377                  * this address hasn't been added into mac table,
8378                  * so just remove the mac node.
8379                  */
8380                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8381                 if (new_node) {
8382                         hclge_update_mac_node(new_node, mac_node->state);
8383                         list_del(&mac_node->node);
8384                         kfree(mac_node);
8385                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8386                         mac_node->state = HCLGE_MAC_TO_DEL;
8387                         list_del(&mac_node->node);
8388                         list_add_tail(&mac_node->node, mac_list);
8389                 } else {
8390                         list_del(&mac_node->node);
8391                         kfree(mac_node);
8392                 }
8393         }
8394
8395         return all_added;
8396 }
8397
8398 static void hclge_sync_from_del_list(struct list_head *del_list,
8399                                      struct list_head *mac_list)
8400 {
8401         struct hclge_mac_node *mac_node, *tmp, *new_node;
8402
8403         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8404                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8405                 if (new_node) {
8406                         /* If the mac addr exists in the mac list, it means
8407                          * received a new TO_ADD request during the time window
8408                          * of configuring the mac address. For the mac node
8409                          * state is TO_ADD, and the address is already in the
8410                          * in the hardware(due to delete fail), so we just need
8411                          * to change the mac node state to ACTIVE.
8412                          */
8413                         new_node->state = HCLGE_MAC_ACTIVE;
8414                         list_del(&mac_node->node);
8415                         kfree(mac_node);
8416                 } else {
8417                         list_del(&mac_node->node);
8418                         list_add_tail(&mac_node->node, mac_list);
8419                 }
8420         }
8421 }
8422
8423 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8424                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
8425                                         bool is_all_added)
8426 {
8427         if (mac_type == HCLGE_MAC_ADDR_UC) {
8428                 if (is_all_added)
8429                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8430                 else
8431                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8432         } else {
8433                 if (is_all_added)
8434                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8435                 else
8436                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8437         }
8438 }
8439
8440 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8441                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
8442 {
8443         struct hclge_mac_node *mac_node, *tmp, *new_node;
8444         struct list_head tmp_add_list, tmp_del_list;
8445         struct list_head *list;
8446         bool all_added;
8447
8448         INIT_LIST_HEAD(&tmp_add_list);
8449         INIT_LIST_HEAD(&tmp_del_list);
8450
8451         /* move the mac addr to the tmp_add_list and tmp_del_list, then
8452          * we can add/delete these mac addr outside the spin lock
8453          */
8454         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8455                 &vport->uc_mac_list : &vport->mc_mac_list;
8456
8457         spin_lock_bh(&vport->mac_list_lock);
8458
8459         list_for_each_entry_safe(mac_node, tmp, list, node) {
8460                 switch (mac_node->state) {
8461                 case HCLGE_MAC_TO_DEL:
8462                         list_del(&mac_node->node);
8463                         list_add_tail(&mac_node->node, &tmp_del_list);
8464                         break;
8465                 case HCLGE_MAC_TO_ADD:
8466                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8467                         if (!new_node)
8468                                 goto stop_traverse;
8469                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8470                         new_node->state = mac_node->state;
8471                         list_add_tail(&new_node->node, &tmp_add_list);
8472                         break;
8473                 default:
8474                         break;
8475                 }
8476         }
8477
8478 stop_traverse:
8479         spin_unlock_bh(&vport->mac_list_lock);
8480
8481         /* delete first, in order to get max mac table space for adding */
8482         if (mac_type == HCLGE_MAC_ADDR_UC) {
8483                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8484                                             hclge_rm_uc_addr_common);
8485                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8486                                           hclge_add_uc_addr_common);
8487         } else {
8488                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8489                                             hclge_rm_mc_addr_common);
8490                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8491                                           hclge_add_mc_addr_common);
8492         }
8493
8494         /* if some mac addresses were added/deleted fail, move back to the
8495          * mac_list, and retry at next time.
8496          */
8497         spin_lock_bh(&vport->mac_list_lock);
8498
8499         hclge_sync_from_del_list(&tmp_del_list, list);
8500         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8501
8502         spin_unlock_bh(&vport->mac_list_lock);
8503
8504         hclge_update_overflow_flags(vport, mac_type, all_added);
8505 }
8506
8507 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8508 {
8509         struct hclge_dev *hdev = vport->back;
8510
8511         if (test_bit(vport->vport_id, hdev->vport_config_block))
8512                 return false;
8513
8514         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8515                 return true;
8516
8517         return false;
8518 }
8519
8520 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8521 {
8522         int i;
8523
8524         for (i = 0; i < hdev->num_alloc_vport; i++) {
8525                 struct hclge_vport *vport = &hdev->vport[i];
8526
8527                 if (!hclge_need_sync_mac_table(vport))
8528                         continue;
8529
8530                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8531                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8532         }
8533 }
8534
8535 static void hclge_build_del_list(struct list_head *list,
8536                                  bool is_del_list,
8537                                  struct list_head *tmp_del_list)
8538 {
8539         struct hclge_mac_node *mac_cfg, *tmp;
8540
8541         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8542                 switch (mac_cfg->state) {
8543                 case HCLGE_MAC_TO_DEL:
8544                 case HCLGE_MAC_ACTIVE:
8545                         list_del(&mac_cfg->node);
8546                         list_add_tail(&mac_cfg->node, tmp_del_list);
8547                         break;
8548                 case HCLGE_MAC_TO_ADD:
8549                         if (is_del_list) {
8550                                 list_del(&mac_cfg->node);
8551                                 kfree(mac_cfg);
8552                         }
8553                         break;
8554                 }
8555         }
8556 }
8557
8558 static void hclge_unsync_del_list(struct hclge_vport *vport,
8559                                   int (*unsync)(struct hclge_vport *vport,
8560                                                 const unsigned char *addr),
8561                                   bool is_del_list,
8562                                   struct list_head *tmp_del_list)
8563 {
8564         struct hclge_mac_node *mac_cfg, *tmp;
8565         int ret;
8566
8567         list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8568                 ret = unsync(vport, mac_cfg->mac_addr);
8569                 if (!ret || ret == -ENOENT) {
8570                         /* clear all mac addr from hardware, but remain these
8571                          * mac addr in the mac list, and restore them after
8572                          * vf reset finished.
8573                          */
8574                         if (!is_del_list &&
8575                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
8576                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
8577                         } else {
8578                                 list_del(&mac_cfg->node);
8579                                 kfree(mac_cfg);
8580                         }
8581                 } else if (is_del_list) {
8582                         mac_cfg->state = HCLGE_MAC_TO_DEL;
8583                 }
8584         }
8585 }
8586
8587 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8588                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
8589 {
8590         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8591         struct hclge_dev *hdev = vport->back;
8592         struct list_head tmp_del_list, *list;
8593
8594         if (mac_type == HCLGE_MAC_ADDR_UC) {
8595                 list = &vport->uc_mac_list;
8596                 unsync = hclge_rm_uc_addr_common;
8597         } else {
8598                 list = &vport->mc_mac_list;
8599                 unsync = hclge_rm_mc_addr_common;
8600         }
8601
8602         INIT_LIST_HEAD(&tmp_del_list);
8603
8604         if (!is_del_list)
8605                 set_bit(vport->vport_id, hdev->vport_config_block);
8606
8607         spin_lock_bh(&vport->mac_list_lock);
8608
8609         hclge_build_del_list(list, is_del_list, &tmp_del_list);
8610
8611         spin_unlock_bh(&vport->mac_list_lock);
8612
8613         hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8614
8615         spin_lock_bh(&vport->mac_list_lock);
8616
8617         hclge_sync_from_del_list(&tmp_del_list, list);
8618
8619         spin_unlock_bh(&vport->mac_list_lock);
8620 }
8621
8622 /* remove all mac address when uninitailize */
8623 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8624                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
8625 {
8626         struct hclge_mac_node *mac_node, *tmp;
8627         struct hclge_dev *hdev = vport->back;
8628         struct list_head tmp_del_list, *list;
8629
8630         INIT_LIST_HEAD(&tmp_del_list);
8631
8632         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8633                 &vport->uc_mac_list : &vport->mc_mac_list;
8634
8635         spin_lock_bh(&vport->mac_list_lock);
8636
8637         list_for_each_entry_safe(mac_node, tmp, list, node) {
8638                 switch (mac_node->state) {
8639                 case HCLGE_MAC_TO_DEL:
8640                 case HCLGE_MAC_ACTIVE:
8641                         list_del(&mac_node->node);
8642                         list_add_tail(&mac_node->node, &tmp_del_list);
8643                         break;
8644                 case HCLGE_MAC_TO_ADD:
8645                         list_del(&mac_node->node);
8646                         kfree(mac_node);
8647                         break;
8648                 }
8649         }
8650
8651         spin_unlock_bh(&vport->mac_list_lock);
8652
8653         if (mac_type == HCLGE_MAC_ADDR_UC)
8654                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8655                                             hclge_rm_uc_addr_common);
8656         else
8657                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8658                                             hclge_rm_mc_addr_common);
8659
8660         if (!list_empty(&tmp_del_list))
8661                 dev_warn(&hdev->pdev->dev,
8662                          "uninit %s mac list for vport %u not completely.\n",
8663                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8664                          vport->vport_id);
8665
8666         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8667                 list_del(&mac_node->node);
8668                 kfree(mac_node);
8669         }
8670 }
8671
8672 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8673 {
8674         struct hclge_vport *vport;
8675         int i;
8676
8677         for (i = 0; i < hdev->num_alloc_vport; i++) {
8678                 vport = &hdev->vport[i];
8679                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8680                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8681         }
8682 }
8683
8684 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8685                                               u16 cmdq_resp, u8 resp_code)
8686 {
8687 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
8688 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
8689 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
8690 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
8691
8692         int return_status;
8693
8694         if (cmdq_resp) {
8695                 dev_err(&hdev->pdev->dev,
8696                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8697                         cmdq_resp);
8698                 return -EIO;
8699         }
8700
8701         switch (resp_code) {
8702         case HCLGE_ETHERTYPE_SUCCESS_ADD:
8703         case HCLGE_ETHERTYPE_ALREADY_ADD:
8704                 return_status = 0;
8705                 break;
8706         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8707                 dev_err(&hdev->pdev->dev,
8708                         "add mac ethertype failed for manager table overflow.\n");
8709                 return_status = -EIO;
8710                 break;
8711         case HCLGE_ETHERTYPE_KEY_CONFLICT:
8712                 dev_err(&hdev->pdev->dev,
8713                         "add mac ethertype failed for key conflict.\n");
8714                 return_status = -EIO;
8715                 break;
8716         default:
8717                 dev_err(&hdev->pdev->dev,
8718                         "add mac ethertype failed for undefined, code=%u.\n",
8719                         resp_code);
8720                 return_status = -EIO;
8721         }
8722
8723         return return_status;
8724 }
8725
8726 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8727                                      u8 *mac_addr)
8728 {
8729         struct hclge_mac_vlan_tbl_entry_cmd req;
8730         struct hclge_dev *hdev = vport->back;
8731         struct hclge_desc desc;
8732         u16 egress_port = 0;
8733         int i;
8734
8735         if (is_zero_ether_addr(mac_addr))
8736                 return false;
8737
8738         memset(&req, 0, sizeof(req));
8739         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8740                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8741         req.egress_port = cpu_to_le16(egress_port);
8742         hclge_prepare_mac_addr(&req, mac_addr, false);
8743
8744         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8745                 return true;
8746
8747         vf_idx += HCLGE_VF_VPORT_START_NUM;
8748         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8749                 if (i != vf_idx &&
8750                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8751                         return true;
8752
8753         return false;
8754 }
8755
8756 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8757                             u8 *mac_addr)
8758 {
8759         struct hclge_vport *vport = hclge_get_vport(handle);
8760         struct hclge_dev *hdev = vport->back;
8761
8762         vport = hclge_get_vf_vport(hdev, vf);
8763         if (!vport)
8764                 return -EINVAL;
8765
8766         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8767                 dev_info(&hdev->pdev->dev,
8768                          "Specified MAC(=%pM) is same as before, no change committed!\n",
8769                          mac_addr);
8770                 return 0;
8771         }
8772
8773         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8774                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8775                         mac_addr);
8776                 return -EEXIST;
8777         }
8778
8779         ether_addr_copy(vport->vf_info.mac, mac_addr);
8780
8781         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8782                 dev_info(&hdev->pdev->dev,
8783                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8784                          vf, mac_addr);
8785                 return hclge_inform_reset_assert_to_vf(vport);
8786         }
8787
8788         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8789                  vf, mac_addr);
8790         return 0;
8791 }
8792
8793 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8794                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
8795 {
8796         struct hclge_desc desc;
8797         u8 resp_code;
8798         u16 retval;
8799         int ret;
8800
8801         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8802         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8803
8804         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8805         if (ret) {
8806                 dev_err(&hdev->pdev->dev,
8807                         "add mac ethertype failed for cmd_send, ret =%d.\n",
8808                         ret);
8809                 return ret;
8810         }
8811
8812         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8813         retval = le16_to_cpu(desc.retval);
8814
8815         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8816 }
8817
8818 static int init_mgr_tbl(struct hclge_dev *hdev)
8819 {
8820         int ret;
8821         int i;
8822
8823         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8824                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8825                 if (ret) {
8826                         dev_err(&hdev->pdev->dev,
8827                                 "add mac ethertype failed, ret =%d.\n",
8828                                 ret);
8829                         return ret;
8830                 }
8831         }
8832
8833         return 0;
8834 }
8835
8836 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8837 {
8838         struct hclge_vport *vport = hclge_get_vport(handle);
8839         struct hclge_dev *hdev = vport->back;
8840
8841         ether_addr_copy(p, hdev->hw.mac.mac_addr);
8842 }
8843
8844 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8845                                        const u8 *old_addr, const u8 *new_addr)
8846 {
8847         struct list_head *list = &vport->uc_mac_list;
8848         struct hclge_mac_node *old_node, *new_node;
8849
8850         new_node = hclge_find_mac_node(list, new_addr);
8851         if (!new_node) {
8852                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8853                 if (!new_node)
8854                         return -ENOMEM;
8855
8856                 new_node->state = HCLGE_MAC_TO_ADD;
8857                 ether_addr_copy(new_node->mac_addr, new_addr);
8858                 list_add(&new_node->node, list);
8859         } else {
8860                 if (new_node->state == HCLGE_MAC_TO_DEL)
8861                         new_node->state = HCLGE_MAC_ACTIVE;
8862
8863                 /* make sure the new addr is in the list head, avoid dev
8864                  * addr may be not re-added into mac table for the umv space
8865                  * limitation after global/imp reset which will clear mac
8866                  * table by hardware.
8867                  */
8868                 list_move(&new_node->node, list);
8869         }
8870
8871         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8872                 old_node = hclge_find_mac_node(list, old_addr);
8873                 if (old_node) {
8874                         if (old_node->state == HCLGE_MAC_TO_ADD) {
8875                                 list_del(&old_node->node);
8876                                 kfree(old_node);
8877                         } else {
8878                                 old_node->state = HCLGE_MAC_TO_DEL;
8879                         }
8880                 }
8881         }
8882
8883         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8884
8885         return 0;
8886 }
8887
8888 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8889                               bool is_first)
8890 {
8891         const unsigned char *new_addr = (const unsigned char *)p;
8892         struct hclge_vport *vport = hclge_get_vport(handle);
8893         struct hclge_dev *hdev = vport->back;
8894         unsigned char *old_addr = NULL;
8895         int ret;
8896
8897         /* mac addr check */
8898         if (is_zero_ether_addr(new_addr) ||
8899             is_broadcast_ether_addr(new_addr) ||
8900             is_multicast_ether_addr(new_addr)) {
8901                 dev_err(&hdev->pdev->dev,
8902                         "change uc mac err! invalid mac: %pM.\n",
8903                          new_addr);
8904                 return -EINVAL;
8905         }
8906
8907         ret = hclge_pause_addr_cfg(hdev, new_addr);
8908         if (ret) {
8909                 dev_err(&hdev->pdev->dev,
8910                         "failed to configure mac pause address, ret = %d\n",
8911                         ret);
8912                 return ret;
8913         }
8914
8915         if (!is_first)
8916                 old_addr = hdev->hw.mac.mac_addr;
8917
8918         spin_lock_bh(&vport->mac_list_lock);
8919         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8920         if (ret) {
8921                 dev_err(&hdev->pdev->dev,
8922                         "failed to change the mac addr:%pM, ret = %d\n",
8923                         new_addr, ret);
8924                 spin_unlock_bh(&vport->mac_list_lock);
8925
8926                 if (!is_first)
8927                         hclge_pause_addr_cfg(hdev, old_addr);
8928
8929                 return ret;
8930         }
8931         /* we must update dev addr with spin lock protect, preventing dev addr
8932          * being removed by set_rx_mode path.
8933          */
8934         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8935         spin_unlock_bh(&vport->mac_list_lock);
8936
8937         hclge_task_schedule(hdev, 0);
8938
8939         return 0;
8940 }
8941
8942 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
8943 {
8944         struct mii_ioctl_data *data = if_mii(ifr);
8945
8946         if (!hnae3_dev_phy_imp_supported(hdev))
8947                 return -EOPNOTSUPP;
8948
8949         switch (cmd) {
8950         case SIOCGMIIPHY:
8951                 data->phy_id = hdev->hw.mac.phy_addr;
8952                 /* this command reads phy id and register at the same time */
8953                 fallthrough;
8954         case SIOCGMIIREG:
8955                 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
8956                 return 0;
8957
8958         case SIOCSMIIREG:
8959                 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
8960         default:
8961                 return -EOPNOTSUPP;
8962         }
8963 }
8964
8965 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8966                           int cmd)
8967 {
8968         struct hclge_vport *vport = hclge_get_vport(handle);
8969         struct hclge_dev *hdev = vport->back;
8970
8971         if (!hdev->hw.mac.phydev)
8972                 return hclge_mii_ioctl(hdev, ifr, cmd);
8973
8974         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8975 }
8976
8977 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8978                                       u8 fe_type, bool filter_en, u8 vf_id)
8979 {
8980         struct hclge_vlan_filter_ctrl_cmd *req;
8981         struct hclge_desc desc;
8982         int ret;
8983
8984         /* read current vlan filter parameter */
8985         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8986         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8987         req->vlan_type = vlan_type;
8988         req->vf_id = vf_id;
8989
8990         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8991         if (ret) {
8992                 dev_err(&hdev->pdev->dev,
8993                         "failed to get vlan filter config, ret = %d.\n", ret);
8994                 return ret;
8995         }
8996
8997         /* modify and write new config parameter */
8998         hclge_cmd_reuse_desc(&desc, false);
8999         req->vlan_fe = filter_en ?
9000                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9001
9002         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9003         if (ret)
9004                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9005                         ret);
9006
9007         return ret;
9008 }
9009
9010 #define HCLGE_FILTER_TYPE_VF            0
9011 #define HCLGE_FILTER_TYPE_PORT          1
9012 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
9013 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
9014 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
9015 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
9016 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
9017 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
9018                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
9019 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
9020                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
9021
9022 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9023 {
9024         struct hclge_vport *vport = hclge_get_vport(handle);
9025         struct hclge_dev *hdev = vport->back;
9026
9027         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9028                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9029                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
9030                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9031                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
9032         } else {
9033                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9034                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
9035                                            0);
9036         }
9037         if (enable)
9038                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9039         else
9040                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
9041 }
9042
9043 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9044                                         bool is_kill, u16 vlan,
9045                                         struct hclge_desc *desc)
9046 {
9047         struct hclge_vlan_filter_vf_cfg_cmd *req0;
9048         struct hclge_vlan_filter_vf_cfg_cmd *req1;
9049         u8 vf_byte_val;
9050         u8 vf_byte_off;
9051         int ret;
9052
9053         hclge_cmd_setup_basic_desc(&desc[0],
9054                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9055         hclge_cmd_setup_basic_desc(&desc[1],
9056                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9057
9058         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9059
9060         vf_byte_off = vfid / 8;
9061         vf_byte_val = 1 << (vfid % 8);
9062
9063         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9064         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9065
9066         req0->vlan_id  = cpu_to_le16(vlan);
9067         req0->vlan_cfg = is_kill;
9068
9069         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9070                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9071         else
9072                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9073
9074         ret = hclge_cmd_send(&hdev->hw, desc, 2);
9075         if (ret) {
9076                 dev_err(&hdev->pdev->dev,
9077                         "Send vf vlan command fail, ret =%d.\n",
9078                         ret);
9079                 return ret;
9080         }
9081
9082         return 0;
9083 }
9084
9085 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9086                                           bool is_kill, struct hclge_desc *desc)
9087 {
9088         struct hclge_vlan_filter_vf_cfg_cmd *req;
9089
9090         req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9091
9092         if (!is_kill) {
9093 #define HCLGE_VF_VLAN_NO_ENTRY  2
9094                 if (!req->resp_code || req->resp_code == 1)
9095                         return 0;
9096
9097                 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9098                         set_bit(vfid, hdev->vf_vlan_full);
9099                         dev_warn(&hdev->pdev->dev,
9100                                  "vf vlan table is full, vf vlan filter is disabled\n");
9101                         return 0;
9102                 }
9103
9104                 dev_err(&hdev->pdev->dev,
9105                         "Add vf vlan filter fail, ret =%u.\n",
9106                         req->resp_code);
9107         } else {
9108 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
9109                 if (!req->resp_code)
9110                         return 0;
9111
9112                 /* vf vlan filter is disabled when vf vlan table is full,
9113                  * then new vlan id will not be added into vf vlan table.
9114                  * Just return 0 without warning, avoid massive verbose
9115                  * print logs when unload.
9116                  */
9117                 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9118                         return 0;
9119
9120                 dev_err(&hdev->pdev->dev,
9121                         "Kill vf vlan filter fail, ret =%u.\n",
9122                         req->resp_code);
9123         }
9124
9125         return -EIO;
9126 }
9127
9128 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9129                                     bool is_kill, u16 vlan,
9130                                     __be16 proto)
9131 {
9132         struct hclge_vport *vport = &hdev->vport[vfid];
9133         struct hclge_desc desc[2];
9134         int ret;
9135
9136         /* if vf vlan table is full, firmware will close vf vlan filter, it
9137          * is unable and unnecessary to add new vlan id to vf vlan filter.
9138          * If spoof check is enable, and vf vlan is full, it shouldn't add
9139          * new vlan, because tx packets with these vlan id will be dropped.
9140          */
9141         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9142                 if (vport->vf_info.spoofchk && vlan) {
9143                         dev_err(&hdev->pdev->dev,
9144                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9145                         return -EPERM;
9146                 }
9147                 return 0;
9148         }
9149
9150         ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9151         if (ret)
9152                 return ret;
9153
9154         return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9155 }
9156
9157 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9158                                       u16 vlan_id, bool is_kill)
9159 {
9160         struct hclge_vlan_filter_pf_cfg_cmd *req;
9161         struct hclge_desc desc;
9162         u8 vlan_offset_byte_val;
9163         u8 vlan_offset_byte;
9164         u8 vlan_offset_160;
9165         int ret;
9166
9167         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9168
9169         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9170         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9171                            HCLGE_VLAN_BYTE_SIZE;
9172         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9173
9174         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9175         req->vlan_offset = vlan_offset_160;
9176         req->vlan_cfg = is_kill;
9177         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9178
9179         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9180         if (ret)
9181                 dev_err(&hdev->pdev->dev,
9182                         "port vlan command, send fail, ret =%d.\n", ret);
9183         return ret;
9184 }
9185
9186 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9187                                     u16 vport_id, u16 vlan_id,
9188                                     bool is_kill)
9189 {
9190         u16 vport_idx, vport_num = 0;
9191         int ret;
9192
9193         if (is_kill && !vlan_id)
9194                 return 0;
9195
9196         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
9197                                        proto);
9198         if (ret) {
9199                 dev_err(&hdev->pdev->dev,
9200                         "Set %u vport vlan filter config fail, ret =%d.\n",
9201                         vport_id, ret);
9202                 return ret;
9203         }
9204
9205         /* vlan 0 may be added twice when 8021q module is enabled */
9206         if (!is_kill && !vlan_id &&
9207             test_bit(vport_id, hdev->vlan_table[vlan_id]))
9208                 return 0;
9209
9210         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9211                 dev_err(&hdev->pdev->dev,
9212                         "Add port vlan failed, vport %u is already in vlan %u\n",
9213                         vport_id, vlan_id);
9214                 return -EINVAL;
9215         }
9216
9217         if (is_kill &&
9218             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9219                 dev_err(&hdev->pdev->dev,
9220                         "Delete port vlan failed, vport %u is not in vlan %u\n",
9221                         vport_id, vlan_id);
9222                 return -EINVAL;
9223         }
9224
9225         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9226                 vport_num++;
9227
9228         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9229                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9230                                                  is_kill);
9231
9232         return ret;
9233 }
9234
9235 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9236 {
9237         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9238         struct hclge_vport_vtag_tx_cfg_cmd *req;
9239         struct hclge_dev *hdev = vport->back;
9240         struct hclge_desc desc;
9241         u16 bmap_index;
9242         int status;
9243
9244         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9245
9246         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9247         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9248         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9249         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9250                       vcfg->accept_tag1 ? 1 : 0);
9251         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9252                       vcfg->accept_untag1 ? 1 : 0);
9253         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9254                       vcfg->accept_tag2 ? 1 : 0);
9255         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9256                       vcfg->accept_untag2 ? 1 : 0);
9257         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9258                       vcfg->insert_tag1_en ? 1 : 0);
9259         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9260                       vcfg->insert_tag2_en ? 1 : 0);
9261         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9262                       vcfg->tag_shift_mode_en ? 1 : 0);
9263         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9264
9265         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9266         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9267                         HCLGE_VF_NUM_PER_BYTE;
9268         req->vf_bitmap[bmap_index] =
9269                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9270
9271         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9272         if (status)
9273                 dev_err(&hdev->pdev->dev,
9274                         "Send port txvlan cfg command fail, ret =%d\n",
9275                         status);
9276
9277         return status;
9278 }
9279
9280 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9281 {
9282         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9283         struct hclge_vport_vtag_rx_cfg_cmd *req;
9284         struct hclge_dev *hdev = vport->back;
9285         struct hclge_desc desc;
9286         u16 bmap_index;
9287         int status;
9288
9289         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9290
9291         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9292         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9293                       vcfg->strip_tag1_en ? 1 : 0);
9294         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9295                       vcfg->strip_tag2_en ? 1 : 0);
9296         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9297                       vcfg->vlan1_vlan_prionly ? 1 : 0);
9298         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9299                       vcfg->vlan2_vlan_prionly ? 1 : 0);
9300         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9301                       vcfg->strip_tag1_discard_en ? 1 : 0);
9302         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9303                       vcfg->strip_tag2_discard_en ? 1 : 0);
9304
9305         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9306         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9307                         HCLGE_VF_NUM_PER_BYTE;
9308         req->vf_bitmap[bmap_index] =
9309                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9310
9311         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9312         if (status)
9313                 dev_err(&hdev->pdev->dev,
9314                         "Send port rxvlan cfg command fail, ret =%d\n",
9315                         status);
9316
9317         return status;
9318 }
9319
9320 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9321                                   u16 port_base_vlan_state,
9322                                   u16 vlan_tag)
9323 {
9324         int ret;
9325
9326         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9327                 vport->txvlan_cfg.accept_tag1 = true;
9328                 vport->txvlan_cfg.insert_tag1_en = false;
9329                 vport->txvlan_cfg.default_tag1 = 0;
9330         } else {
9331                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9332
9333                 vport->txvlan_cfg.accept_tag1 =
9334                         ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9335                 vport->txvlan_cfg.insert_tag1_en = true;
9336                 vport->txvlan_cfg.default_tag1 = vlan_tag;
9337         }
9338
9339         vport->txvlan_cfg.accept_untag1 = true;
9340
9341         /* accept_tag2 and accept_untag2 are not supported on
9342          * pdev revision(0x20), new revision support them,
9343          * this two fields can not be configured by user.
9344          */
9345         vport->txvlan_cfg.accept_tag2 = true;
9346         vport->txvlan_cfg.accept_untag2 = true;
9347         vport->txvlan_cfg.insert_tag2_en = false;
9348         vport->txvlan_cfg.default_tag2 = 0;
9349         vport->txvlan_cfg.tag_shift_mode_en = true;
9350
9351         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9352                 vport->rxvlan_cfg.strip_tag1_en = false;
9353                 vport->rxvlan_cfg.strip_tag2_en =
9354                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9355                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9356         } else {
9357                 vport->rxvlan_cfg.strip_tag1_en =
9358                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9359                 vport->rxvlan_cfg.strip_tag2_en = true;
9360                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9361         }
9362
9363         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9364         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9365         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9366
9367         ret = hclge_set_vlan_tx_offload_cfg(vport);
9368         if (ret)
9369                 return ret;
9370
9371         return hclge_set_vlan_rx_offload_cfg(vport);
9372 }
9373
9374 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9375 {
9376         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9377         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9378         struct hclge_desc desc;
9379         int status;
9380
9381         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9382         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9383         rx_req->ot_fst_vlan_type =
9384                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9385         rx_req->ot_sec_vlan_type =
9386                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9387         rx_req->in_fst_vlan_type =
9388                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9389         rx_req->in_sec_vlan_type =
9390                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9391
9392         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9393         if (status) {
9394                 dev_err(&hdev->pdev->dev,
9395                         "Send rxvlan protocol type command fail, ret =%d\n",
9396                         status);
9397                 return status;
9398         }
9399
9400         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9401
9402         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9403         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9404         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9405
9406         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9407         if (status)
9408                 dev_err(&hdev->pdev->dev,
9409                         "Send txvlan protocol type command fail, ret =%d\n",
9410                         status);
9411
9412         return status;
9413 }
9414
9415 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9416 {
9417 #define HCLGE_DEF_VLAN_TYPE             0x8100
9418
9419         struct hnae3_handle *handle = &hdev->vport[0].nic;
9420         struct hclge_vport *vport;
9421         int ret;
9422         int i;
9423
9424         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9425                 /* for revision 0x21, vf vlan filter is per function */
9426                 for (i = 0; i < hdev->num_alloc_vport; i++) {
9427                         vport = &hdev->vport[i];
9428                         ret = hclge_set_vlan_filter_ctrl(hdev,
9429                                                          HCLGE_FILTER_TYPE_VF,
9430                                                          HCLGE_FILTER_FE_EGRESS,
9431                                                          true,
9432                                                          vport->vport_id);
9433                         if (ret)
9434                                 return ret;
9435                 }
9436
9437                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9438                                                  HCLGE_FILTER_FE_INGRESS, true,
9439                                                  0);
9440                 if (ret)
9441                         return ret;
9442         } else {
9443                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9444                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
9445                                                  true, 0);
9446                 if (ret)
9447                         return ret;
9448         }
9449
9450         handle->netdev_flags |= HNAE3_VLAN_FLTR;
9451
9452         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9453         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9454         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9455         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9456         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9457         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9458
9459         ret = hclge_set_vlan_protocol_type(hdev);
9460         if (ret)
9461                 return ret;
9462
9463         for (i = 0; i < hdev->num_alloc_vport; i++) {
9464                 u16 vlan_tag;
9465
9466                 vport = &hdev->vport[i];
9467                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9468
9469                 ret = hclge_vlan_offload_cfg(vport,
9470                                              vport->port_base_vlan_cfg.state,
9471                                              vlan_tag);
9472                 if (ret)
9473                         return ret;
9474         }
9475
9476         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9477 }
9478
9479 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9480                                        bool writen_to_tbl)
9481 {
9482         struct hclge_vport_vlan_cfg *vlan;
9483
9484         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9485         if (!vlan)
9486                 return;
9487
9488         vlan->hd_tbl_status = writen_to_tbl;
9489         vlan->vlan_id = vlan_id;
9490
9491         list_add_tail(&vlan->node, &vport->vlan_list);
9492 }
9493
9494 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9495 {
9496         struct hclge_vport_vlan_cfg *vlan, *tmp;
9497         struct hclge_dev *hdev = vport->back;
9498         int ret;
9499
9500         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9501                 if (!vlan->hd_tbl_status) {
9502                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9503                                                        vport->vport_id,
9504                                                        vlan->vlan_id, false);
9505                         if (ret) {
9506                                 dev_err(&hdev->pdev->dev,
9507                                         "restore vport vlan list failed, ret=%d\n",
9508                                         ret);
9509                                 return ret;
9510                         }
9511                 }
9512                 vlan->hd_tbl_status = true;
9513         }
9514
9515         return 0;
9516 }
9517
9518 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9519                                       bool is_write_tbl)
9520 {
9521         struct hclge_vport_vlan_cfg *vlan, *tmp;
9522         struct hclge_dev *hdev = vport->back;
9523
9524         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9525                 if (vlan->vlan_id == vlan_id) {
9526                         if (is_write_tbl && vlan->hd_tbl_status)
9527                                 hclge_set_vlan_filter_hw(hdev,
9528                                                          htons(ETH_P_8021Q),
9529                                                          vport->vport_id,
9530                                                          vlan_id,
9531                                                          true);
9532
9533                         list_del(&vlan->node);
9534                         kfree(vlan);
9535                         break;
9536                 }
9537         }
9538 }
9539
9540 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9541 {
9542         struct hclge_vport_vlan_cfg *vlan, *tmp;
9543         struct hclge_dev *hdev = vport->back;
9544
9545         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9546                 if (vlan->hd_tbl_status)
9547                         hclge_set_vlan_filter_hw(hdev,
9548                                                  htons(ETH_P_8021Q),
9549                                                  vport->vport_id,
9550                                                  vlan->vlan_id,
9551                                                  true);
9552
9553                 vlan->hd_tbl_status = false;
9554                 if (is_del_list) {
9555                         list_del(&vlan->node);
9556                         kfree(vlan);
9557                 }
9558         }
9559         clear_bit(vport->vport_id, hdev->vf_vlan_full);
9560 }
9561
9562 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9563 {
9564         struct hclge_vport_vlan_cfg *vlan, *tmp;
9565         struct hclge_vport *vport;
9566         int i;
9567
9568         for (i = 0; i < hdev->num_alloc_vport; i++) {
9569                 vport = &hdev->vport[i];
9570                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9571                         list_del(&vlan->node);
9572                         kfree(vlan);
9573                 }
9574         }
9575 }
9576
9577 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9578 {
9579         struct hclge_vport_vlan_cfg *vlan, *tmp;
9580         struct hclge_dev *hdev = vport->back;
9581         u16 vlan_proto;
9582         u16 vlan_id;
9583         u16 state;
9584         int ret;
9585
9586         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9587         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9588         state = vport->port_base_vlan_cfg.state;
9589
9590         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9591                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9592                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9593                                          vport->vport_id, vlan_id,
9594                                          false);
9595                 return;
9596         }
9597
9598         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9599                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9600                                                vport->vport_id,
9601                                                vlan->vlan_id, false);
9602                 if (ret)
9603                         break;
9604                 vlan->hd_tbl_status = true;
9605         }
9606 }
9607
9608 /* For global reset and imp reset, hardware will clear the mac table,
9609  * so we change the mac address state from ACTIVE to TO_ADD, then they
9610  * can be restored in the service task after reset complete. Furtherly,
9611  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9612  * be restored after reset, so just remove these mac nodes from mac_list.
9613  */
9614 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9615 {
9616         struct hclge_mac_node *mac_node, *tmp;
9617
9618         list_for_each_entry_safe(mac_node, tmp, list, node) {
9619                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9620                         mac_node->state = HCLGE_MAC_TO_ADD;
9621                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9622                         list_del(&mac_node->node);
9623                         kfree(mac_node);
9624                 }
9625         }
9626 }
9627
9628 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9629 {
9630         spin_lock_bh(&vport->mac_list_lock);
9631
9632         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9633         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9634         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9635
9636         spin_unlock_bh(&vport->mac_list_lock);
9637 }
9638
9639 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9640 {
9641         struct hclge_vport *vport = &hdev->vport[0];
9642         struct hnae3_handle *handle = &vport->nic;
9643
9644         hclge_restore_mac_table_common(vport);
9645         hclge_restore_vport_vlan_table(vport);
9646         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9647
9648         hclge_restore_fd_entries(handle);
9649 }
9650
9651 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9652 {
9653         struct hclge_vport *vport = hclge_get_vport(handle);
9654
9655         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9656                 vport->rxvlan_cfg.strip_tag1_en = false;
9657                 vport->rxvlan_cfg.strip_tag2_en = enable;
9658                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9659         } else {
9660                 vport->rxvlan_cfg.strip_tag1_en = enable;
9661                 vport->rxvlan_cfg.strip_tag2_en = true;
9662                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9663         }
9664
9665         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9666         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9667         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9668         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9669
9670         return hclge_set_vlan_rx_offload_cfg(vport);
9671 }
9672
9673 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9674                                             u16 port_base_vlan_state,
9675                                             struct hclge_vlan_info *new_info,
9676                                             struct hclge_vlan_info *old_info)
9677 {
9678         struct hclge_dev *hdev = vport->back;
9679         int ret;
9680
9681         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9682                 hclge_rm_vport_all_vlan_table(vport, false);
9683                 return hclge_set_vlan_filter_hw(hdev,
9684                                                  htons(new_info->vlan_proto),
9685                                                  vport->vport_id,
9686                                                  new_info->vlan_tag,
9687                                                  false);
9688         }
9689
9690         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9691                                        vport->vport_id, old_info->vlan_tag,
9692                                        true);
9693         if (ret)
9694                 return ret;
9695
9696         return hclge_add_vport_all_vlan_table(vport);
9697 }
9698
9699 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9700                                     struct hclge_vlan_info *vlan_info)
9701 {
9702         struct hnae3_handle *nic = &vport->nic;
9703         struct hclge_vlan_info *old_vlan_info;
9704         struct hclge_dev *hdev = vport->back;
9705         int ret;
9706
9707         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9708
9709         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9710         if (ret)
9711                 return ret;
9712
9713         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9714                 /* add new VLAN tag */
9715                 ret = hclge_set_vlan_filter_hw(hdev,
9716                                                htons(vlan_info->vlan_proto),
9717                                                vport->vport_id,
9718                                                vlan_info->vlan_tag,
9719                                                false);
9720                 if (ret)
9721                         return ret;
9722
9723                 /* remove old VLAN tag */
9724                 ret = hclge_set_vlan_filter_hw(hdev,
9725                                                htons(old_vlan_info->vlan_proto),
9726                                                vport->vport_id,
9727                                                old_vlan_info->vlan_tag,
9728                                                true);
9729                 if (ret)
9730                         return ret;
9731
9732                 goto update;
9733         }
9734
9735         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9736                                                old_vlan_info);
9737         if (ret)
9738                 return ret;
9739
9740         /* update state only when disable/enable port based VLAN */
9741         vport->port_base_vlan_cfg.state = state;
9742         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9743                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9744         else
9745                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9746
9747 update:
9748         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9749         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9750         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9751
9752         return 0;
9753 }
9754
9755 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9756                                           enum hnae3_port_base_vlan_state state,
9757                                           u16 vlan)
9758 {
9759         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9760                 if (!vlan)
9761                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9762                 else
9763                         return HNAE3_PORT_BASE_VLAN_ENABLE;
9764         } else {
9765                 if (!vlan)
9766                         return HNAE3_PORT_BASE_VLAN_DISABLE;
9767                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9768                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9769                 else
9770                         return HNAE3_PORT_BASE_VLAN_MODIFY;
9771         }
9772 }
9773
9774 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9775                                     u16 vlan, u8 qos, __be16 proto)
9776 {
9777         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9778         struct hclge_vport *vport = hclge_get_vport(handle);
9779         struct hclge_dev *hdev = vport->back;
9780         struct hclge_vlan_info vlan_info;
9781         u16 state;
9782         int ret;
9783
9784         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9785                 return -EOPNOTSUPP;
9786
9787         vport = hclge_get_vf_vport(hdev, vfid);
9788         if (!vport)
9789                 return -EINVAL;
9790
9791         /* qos is a 3 bits value, so can not be bigger than 7 */
9792         if (vlan > VLAN_N_VID - 1 || qos > 7)
9793                 return -EINVAL;
9794         if (proto != htons(ETH_P_8021Q))
9795                 return -EPROTONOSUPPORT;
9796
9797         state = hclge_get_port_base_vlan_state(vport,
9798                                                vport->port_base_vlan_cfg.state,
9799                                                vlan);
9800         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9801                 return 0;
9802
9803         vlan_info.vlan_tag = vlan;
9804         vlan_info.qos = qos;
9805         vlan_info.vlan_proto = ntohs(proto);
9806
9807         ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9808         if (ret) {
9809                 dev_err(&hdev->pdev->dev,
9810                         "failed to update port base vlan for vf %d, ret = %d\n",
9811                         vfid, ret);
9812                 return ret;
9813         }
9814
9815         /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9816          * VLAN state.
9817          */
9818         if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9819             test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9820                 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9821                                                   vport->vport_id, state,
9822                                                   vlan, qos,
9823                                                   ntohs(proto));
9824
9825         return 0;
9826 }
9827
9828 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9829 {
9830         struct hclge_vlan_info *vlan_info;
9831         struct hclge_vport *vport;
9832         int ret;
9833         int vf;
9834
9835         /* clear port base vlan for all vf */
9836         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9837                 vport = &hdev->vport[vf];
9838                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9839
9840                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9841                                                vport->vport_id,
9842                                                vlan_info->vlan_tag, true);
9843                 if (ret)
9844                         dev_err(&hdev->pdev->dev,
9845                                 "failed to clear vf vlan for vf%d, ret = %d\n",
9846                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9847         }
9848 }
9849
9850 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9851                           u16 vlan_id, bool is_kill)
9852 {
9853         struct hclge_vport *vport = hclge_get_vport(handle);
9854         struct hclge_dev *hdev = vport->back;
9855         bool writen_to_tbl = false;
9856         int ret = 0;
9857
9858         /* When device is resetting or reset failed, firmware is unable to
9859          * handle mailbox. Just record the vlan id, and remove it after
9860          * reset finished.
9861          */
9862         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9863              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9864                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9865                 return -EBUSY;
9866         }
9867
9868         /* when port base vlan enabled, we use port base vlan as the vlan
9869          * filter entry. In this case, we don't update vlan filter table
9870          * when user add new vlan or remove exist vlan, just update the vport
9871          * vlan list. The vlan id in vlan list will be writen in vlan filter
9872          * table until port base vlan disabled
9873          */
9874         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9875                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9876                                                vlan_id, is_kill);
9877                 writen_to_tbl = true;
9878         }
9879
9880         if (!ret) {
9881                 if (is_kill)
9882                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9883                 else
9884                         hclge_add_vport_vlan_table(vport, vlan_id,
9885                                                    writen_to_tbl);
9886         } else if (is_kill) {
9887                 /* when remove hw vlan filter failed, record the vlan id,
9888                  * and try to remove it from hw later, to be consistence
9889                  * with stack
9890                  */
9891                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9892         }
9893         return ret;
9894 }
9895
9896 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9897 {
9898 #define HCLGE_MAX_SYNC_COUNT    60
9899
9900         int i, ret, sync_cnt = 0;
9901         u16 vlan_id;
9902
9903         /* start from vport 1 for PF is always alive */
9904         for (i = 0; i < hdev->num_alloc_vport; i++) {
9905                 struct hclge_vport *vport = &hdev->vport[i];
9906
9907                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9908                                          VLAN_N_VID);
9909                 while (vlan_id != VLAN_N_VID) {
9910                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9911                                                        vport->vport_id, vlan_id,
9912                                                        true);
9913                         if (ret && ret != -EINVAL)
9914                                 return;
9915
9916                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9917                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9918
9919                         sync_cnt++;
9920                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9921                                 return;
9922
9923                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9924                                                  VLAN_N_VID);
9925                 }
9926         }
9927 }
9928
9929 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9930 {
9931         struct hclge_config_max_frm_size_cmd *req;
9932         struct hclge_desc desc;
9933
9934         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9935
9936         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9937         req->max_frm_size = cpu_to_le16(new_mps);
9938         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9939
9940         return hclge_cmd_send(&hdev->hw, &desc, 1);
9941 }
9942
9943 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9944 {
9945         struct hclge_vport *vport = hclge_get_vport(handle);
9946
9947         return hclge_set_vport_mtu(vport, new_mtu);
9948 }
9949
9950 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9951 {
9952         struct hclge_dev *hdev = vport->back;
9953         int i, max_frm_size, ret;
9954
9955         /* HW supprt 2 layer vlan */
9956         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9957         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9958             max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
9959                 return -EINVAL;
9960
9961         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9962         mutex_lock(&hdev->vport_lock);
9963         /* VF's mps must fit within hdev->mps */
9964         if (vport->vport_id && max_frm_size > hdev->mps) {
9965                 mutex_unlock(&hdev->vport_lock);
9966                 return -EINVAL;
9967         } else if (vport->vport_id) {
9968                 vport->mps = max_frm_size;
9969                 mutex_unlock(&hdev->vport_lock);
9970                 return 0;
9971         }
9972
9973         /* PF's mps must be greater then VF's mps */
9974         for (i = 1; i < hdev->num_alloc_vport; i++)
9975                 if (max_frm_size < hdev->vport[i].mps) {
9976                         mutex_unlock(&hdev->vport_lock);
9977                         return -EINVAL;
9978                 }
9979
9980         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9981
9982         ret = hclge_set_mac_mtu(hdev, max_frm_size);
9983         if (ret) {
9984                 dev_err(&hdev->pdev->dev,
9985                         "Change mtu fail, ret =%d\n", ret);
9986                 goto out;
9987         }
9988
9989         hdev->mps = max_frm_size;
9990         vport->mps = max_frm_size;
9991
9992         ret = hclge_buffer_alloc(hdev);
9993         if (ret)
9994                 dev_err(&hdev->pdev->dev,
9995                         "Allocate buffer fail, ret =%d\n", ret);
9996
9997 out:
9998         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9999         mutex_unlock(&hdev->vport_lock);
10000         return ret;
10001 }
10002
10003 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
10004                                     bool enable)
10005 {
10006         struct hclge_reset_tqp_queue_cmd *req;
10007         struct hclge_desc desc;
10008         int ret;
10009
10010         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10011
10012         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10013         req->tqp_id = cpu_to_le16(queue_id);
10014         if (enable)
10015                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10016
10017         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10018         if (ret) {
10019                 dev_err(&hdev->pdev->dev,
10020                         "Send tqp reset cmd error, status =%d\n", ret);
10021                 return ret;
10022         }
10023
10024         return 0;
10025 }
10026
10027 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10028 {
10029         struct hclge_reset_tqp_queue_cmd *req;
10030         struct hclge_desc desc;
10031         int ret;
10032
10033         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10034
10035         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10036         req->tqp_id = cpu_to_le16(queue_id);
10037
10038         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10039         if (ret) {
10040                 dev_err(&hdev->pdev->dev,
10041                         "Get reset status error, status =%d\n", ret);
10042                 return ret;
10043         }
10044
10045         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10046 }
10047
10048 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10049 {
10050         struct hnae3_queue *queue;
10051         struct hclge_tqp *tqp;
10052
10053         queue = handle->kinfo.tqp[queue_id];
10054         tqp = container_of(queue, struct hclge_tqp, q);
10055
10056         return tqp->index;
10057 }
10058
10059 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
10060 {
10061         struct hclge_vport *vport = hclge_get_vport(handle);
10062         struct hclge_dev *hdev = vport->back;
10063         int reset_try_times = 0;
10064         int reset_status;
10065         u16 queue_gid;
10066         int ret;
10067
10068         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
10069
10070         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
10071         if (ret) {
10072                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
10073                 return ret;
10074         }
10075
10076         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10077         if (ret) {
10078                 dev_err(&hdev->pdev->dev,
10079                         "Send reset tqp cmd fail, ret = %d\n", ret);
10080                 return ret;
10081         }
10082
10083         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10084                 reset_status = hclge_get_reset_status(hdev, queue_gid);
10085                 if (reset_status)
10086                         break;
10087
10088                 /* Wait for tqp hw reset */
10089                 usleep_range(1000, 1200);
10090         }
10091
10092         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10093                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
10094                 return ret;
10095         }
10096
10097         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10098         if (ret)
10099                 dev_err(&hdev->pdev->dev,
10100                         "Deassert the soft reset fail, ret = %d\n", ret);
10101
10102         return ret;
10103 }
10104
10105 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
10106 {
10107         struct hnae3_handle *handle = &vport->nic;
10108         struct hclge_dev *hdev = vport->back;
10109         int reset_try_times = 0;
10110         int reset_status;
10111         u16 queue_gid;
10112         int ret;
10113
10114         if (queue_id >= handle->kinfo.num_tqps) {
10115                 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
10116                          queue_id);
10117                 return;
10118         }
10119
10120         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
10121
10122         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10123         if (ret) {
10124                 dev_warn(&hdev->pdev->dev,
10125                          "Send reset tqp cmd fail, ret = %d\n", ret);
10126                 return;
10127         }
10128
10129         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10130                 reset_status = hclge_get_reset_status(hdev, queue_gid);
10131                 if (reset_status)
10132                         break;
10133
10134                 /* Wait for tqp hw reset */
10135                 usleep_range(1000, 1200);
10136         }
10137
10138         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10139                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
10140                 return;
10141         }
10142
10143         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10144         if (ret)
10145                 dev_warn(&hdev->pdev->dev,
10146                          "Deassert the soft reset fail, ret = %d\n", ret);
10147 }
10148
10149 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10150 {
10151         struct hclge_vport *vport = hclge_get_vport(handle);
10152         struct hclge_dev *hdev = vport->back;
10153
10154         return hdev->fw_version;
10155 }
10156
10157 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10158 {
10159         struct phy_device *phydev = hdev->hw.mac.phydev;
10160
10161         if (!phydev)
10162                 return;
10163
10164         phy_set_asym_pause(phydev, rx_en, tx_en);
10165 }
10166
10167 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10168 {
10169         int ret;
10170
10171         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10172                 return 0;
10173
10174         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10175         if (ret)
10176                 dev_err(&hdev->pdev->dev,
10177                         "configure pauseparam error, ret = %d.\n", ret);
10178
10179         return ret;
10180 }
10181
10182 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10183 {
10184         struct phy_device *phydev = hdev->hw.mac.phydev;
10185         u16 remote_advertising = 0;
10186         u16 local_advertising;
10187         u32 rx_pause, tx_pause;
10188         u8 flowctl;
10189
10190         if (!phydev->link || !phydev->autoneg)
10191                 return 0;
10192
10193         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10194
10195         if (phydev->pause)
10196                 remote_advertising = LPA_PAUSE_CAP;
10197
10198         if (phydev->asym_pause)
10199                 remote_advertising |= LPA_PAUSE_ASYM;
10200
10201         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10202                                            remote_advertising);
10203         tx_pause = flowctl & FLOW_CTRL_TX;
10204         rx_pause = flowctl & FLOW_CTRL_RX;
10205
10206         if (phydev->duplex == HCLGE_MAC_HALF) {
10207                 tx_pause = 0;
10208                 rx_pause = 0;
10209         }
10210
10211         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10212 }
10213
10214 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10215                                  u32 *rx_en, u32 *tx_en)
10216 {
10217         struct hclge_vport *vport = hclge_get_vport(handle);
10218         struct hclge_dev *hdev = vport->back;
10219         u8 media_type = hdev->hw.mac.media_type;
10220
10221         *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10222                     hclge_get_autoneg(handle) : 0;
10223
10224         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10225                 *rx_en = 0;
10226                 *tx_en = 0;
10227                 return;
10228         }
10229
10230         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10231                 *rx_en = 1;
10232                 *tx_en = 0;
10233         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10234                 *tx_en = 1;
10235                 *rx_en = 0;
10236         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10237                 *rx_en = 1;
10238                 *tx_en = 1;
10239         } else {
10240                 *rx_en = 0;
10241                 *tx_en = 0;
10242         }
10243 }
10244
10245 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10246                                          u32 rx_en, u32 tx_en)
10247 {
10248         if (rx_en && tx_en)
10249                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10250         else if (rx_en && !tx_en)
10251                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10252         else if (!rx_en && tx_en)
10253                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10254         else
10255                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10256
10257         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10258 }
10259
10260 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10261                                 u32 rx_en, u32 tx_en)
10262 {
10263         struct hclge_vport *vport = hclge_get_vport(handle);
10264         struct hclge_dev *hdev = vport->back;
10265         struct phy_device *phydev = hdev->hw.mac.phydev;
10266         u32 fc_autoneg;
10267
10268         if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10269                 fc_autoneg = hclge_get_autoneg(handle);
10270                 if (auto_neg != fc_autoneg) {
10271                         dev_info(&hdev->pdev->dev,
10272                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10273                         return -EOPNOTSUPP;
10274                 }
10275         }
10276
10277         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10278                 dev_info(&hdev->pdev->dev,
10279                          "Priority flow control enabled. Cannot set link flow control.\n");
10280                 return -EOPNOTSUPP;
10281         }
10282
10283         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10284
10285         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10286
10287         if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10288                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10289
10290         if (phydev)
10291                 return phy_start_aneg(phydev);
10292
10293         return -EOPNOTSUPP;
10294 }
10295
10296 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10297                                           u8 *auto_neg, u32 *speed, u8 *duplex)
10298 {
10299         struct hclge_vport *vport = hclge_get_vport(handle);
10300         struct hclge_dev *hdev = vport->back;
10301
10302         if (speed)
10303                 *speed = hdev->hw.mac.speed;
10304         if (duplex)
10305                 *duplex = hdev->hw.mac.duplex;
10306         if (auto_neg)
10307                 *auto_neg = hdev->hw.mac.autoneg;
10308 }
10309
10310 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10311                                  u8 *module_type)
10312 {
10313         struct hclge_vport *vport = hclge_get_vport(handle);
10314         struct hclge_dev *hdev = vport->back;
10315
10316         /* When nic is down, the service task is not running, doesn't update
10317          * the port information per second. Query the port information before
10318          * return the media type, ensure getting the correct media information.
10319          */
10320         hclge_update_port_info(hdev);
10321
10322         if (media_type)
10323                 *media_type = hdev->hw.mac.media_type;
10324
10325         if (module_type)
10326                 *module_type = hdev->hw.mac.module_type;
10327 }
10328
10329 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10330                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10331 {
10332         struct hclge_vport *vport = hclge_get_vport(handle);
10333         struct hclge_dev *hdev = vport->back;
10334         struct phy_device *phydev = hdev->hw.mac.phydev;
10335         int mdix_ctrl, mdix, is_resolved;
10336         unsigned int retval;
10337
10338         if (!phydev) {
10339                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10340                 *tp_mdix = ETH_TP_MDI_INVALID;
10341                 return;
10342         }
10343
10344         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10345
10346         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10347         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10348                                     HCLGE_PHY_MDIX_CTRL_S);
10349
10350         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10351         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10352         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10353
10354         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10355
10356         switch (mdix_ctrl) {
10357         case 0x0:
10358                 *tp_mdix_ctrl = ETH_TP_MDI;
10359                 break;
10360         case 0x1:
10361                 *tp_mdix_ctrl = ETH_TP_MDI_X;
10362                 break;
10363         case 0x3:
10364                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10365                 break;
10366         default:
10367                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10368                 break;
10369         }
10370
10371         if (!is_resolved)
10372                 *tp_mdix = ETH_TP_MDI_INVALID;
10373         else if (mdix)
10374                 *tp_mdix = ETH_TP_MDI_X;
10375         else
10376                 *tp_mdix = ETH_TP_MDI;
10377 }
10378
10379 static void hclge_info_show(struct hclge_dev *hdev)
10380 {
10381         struct device *dev = &hdev->pdev->dev;
10382
10383         dev_info(dev, "PF info begin:\n");
10384
10385         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10386         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10387         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10388         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10389         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10390         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10391         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10392         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10393         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10394         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10395         dev_info(dev, "This is %s PF\n",
10396                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10397         dev_info(dev, "DCB %s\n",
10398                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10399         dev_info(dev, "MQPRIO %s\n",
10400                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10401
10402         dev_info(dev, "PF info end.\n");
10403 }
10404
10405 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10406                                           struct hclge_vport *vport)
10407 {
10408         struct hnae3_client *client = vport->nic.client;
10409         struct hclge_dev *hdev = ae_dev->priv;
10410         int rst_cnt = hdev->rst_stats.reset_cnt;
10411         int ret;
10412
10413         ret = client->ops->init_instance(&vport->nic);
10414         if (ret)
10415                 return ret;
10416
10417         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10418         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10419             rst_cnt != hdev->rst_stats.reset_cnt) {
10420                 ret = -EBUSY;
10421                 goto init_nic_err;
10422         }
10423
10424         /* Enable nic hw error interrupts */
10425         ret = hclge_config_nic_hw_error(hdev, true);
10426         if (ret) {
10427                 dev_err(&ae_dev->pdev->dev,
10428                         "fail(%d) to enable hw error interrupts\n", ret);
10429                 goto init_nic_err;
10430         }
10431
10432         hnae3_set_client_init_flag(client, ae_dev, 1);
10433
10434         if (netif_msg_drv(&hdev->vport->nic))
10435                 hclge_info_show(hdev);
10436
10437         return ret;
10438
10439 init_nic_err:
10440         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10441         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10442                 msleep(HCLGE_WAIT_RESET_DONE);
10443
10444         client->ops->uninit_instance(&vport->nic, 0);
10445
10446         return ret;
10447 }
10448
10449 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10450                                            struct hclge_vport *vport)
10451 {
10452         struct hclge_dev *hdev = ae_dev->priv;
10453         struct hnae3_client *client;
10454         int rst_cnt;
10455         int ret;
10456
10457         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10458             !hdev->nic_client)
10459                 return 0;
10460
10461         client = hdev->roce_client;
10462         ret = hclge_init_roce_base_info(vport);
10463         if (ret)
10464                 return ret;
10465
10466         rst_cnt = hdev->rst_stats.reset_cnt;
10467         ret = client->ops->init_instance(&vport->roce);
10468         if (ret)
10469                 return ret;
10470
10471         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10472         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10473             rst_cnt != hdev->rst_stats.reset_cnt) {
10474                 ret = -EBUSY;
10475                 goto init_roce_err;
10476         }
10477
10478         /* Enable roce ras interrupts */
10479         ret = hclge_config_rocee_ras_interrupt(hdev, true);
10480         if (ret) {
10481                 dev_err(&ae_dev->pdev->dev,
10482                         "fail(%d) to enable roce ras interrupts\n", ret);
10483                 goto init_roce_err;
10484         }
10485
10486         hnae3_set_client_init_flag(client, ae_dev, 1);
10487
10488         return 0;
10489
10490 init_roce_err:
10491         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10492         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10493                 msleep(HCLGE_WAIT_RESET_DONE);
10494
10495         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10496
10497         return ret;
10498 }
10499
10500 static int hclge_init_client_instance(struct hnae3_client *client,
10501                                       struct hnae3_ae_dev *ae_dev)
10502 {
10503         struct hclge_dev *hdev = ae_dev->priv;
10504         struct hclge_vport *vport;
10505         int i, ret;
10506
10507         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
10508                 vport = &hdev->vport[i];
10509
10510                 switch (client->type) {
10511                 case HNAE3_CLIENT_KNIC:
10512                         hdev->nic_client = client;
10513                         vport->nic.client = client;
10514                         ret = hclge_init_nic_client_instance(ae_dev, vport);
10515                         if (ret)
10516                                 goto clear_nic;
10517
10518                         ret = hclge_init_roce_client_instance(ae_dev, vport);
10519                         if (ret)
10520                                 goto clear_roce;
10521
10522                         break;
10523                 case HNAE3_CLIENT_ROCE:
10524                         if (hnae3_dev_roce_supported(hdev)) {
10525                                 hdev->roce_client = client;
10526                                 vport->roce.client = client;
10527                         }
10528
10529                         ret = hclge_init_roce_client_instance(ae_dev, vport);
10530                         if (ret)
10531                                 goto clear_roce;
10532
10533                         break;
10534                 default:
10535                         return -EINVAL;
10536                 }
10537         }
10538
10539         return 0;
10540
10541 clear_nic:
10542         hdev->nic_client = NULL;
10543         vport->nic.client = NULL;
10544         return ret;
10545 clear_roce:
10546         hdev->roce_client = NULL;
10547         vport->roce.client = NULL;
10548         return ret;
10549 }
10550
10551 static void hclge_uninit_client_instance(struct hnae3_client *client,
10552                                          struct hnae3_ae_dev *ae_dev)
10553 {
10554         struct hclge_dev *hdev = ae_dev->priv;
10555         struct hclge_vport *vport;
10556         int i;
10557
10558         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10559                 vport = &hdev->vport[i];
10560                 if (hdev->roce_client) {
10561                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10562                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10563                                 msleep(HCLGE_WAIT_RESET_DONE);
10564
10565                         hdev->roce_client->ops->uninit_instance(&vport->roce,
10566                                                                 0);
10567                         hdev->roce_client = NULL;
10568                         vport->roce.client = NULL;
10569                 }
10570                 if (client->type == HNAE3_CLIENT_ROCE)
10571                         return;
10572                 if (hdev->nic_client && client->ops->uninit_instance) {
10573                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10574                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10575                                 msleep(HCLGE_WAIT_RESET_DONE);
10576
10577                         client->ops->uninit_instance(&vport->nic, 0);
10578                         hdev->nic_client = NULL;
10579                         vport->nic.client = NULL;
10580                 }
10581         }
10582 }
10583
10584 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10585 {
10586 #define HCLGE_MEM_BAR           4
10587
10588         struct pci_dev *pdev = hdev->pdev;
10589         struct hclge_hw *hw = &hdev->hw;
10590
10591         /* for device does not have device memory, return directly */
10592         if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10593                 return 0;
10594
10595         hw->mem_base = devm_ioremap_wc(&pdev->dev,
10596                                        pci_resource_start(pdev, HCLGE_MEM_BAR),
10597                                        pci_resource_len(pdev, HCLGE_MEM_BAR));
10598         if (!hw->mem_base) {
10599                 dev_err(&pdev->dev, "failed to map device memory\n");
10600                 return -EFAULT;
10601         }
10602
10603         return 0;
10604 }
10605
10606 static int hclge_pci_init(struct hclge_dev *hdev)
10607 {
10608         struct pci_dev *pdev = hdev->pdev;
10609         struct hclge_hw *hw;
10610         int ret;
10611
10612         ret = pci_enable_device(pdev);
10613         if (ret) {
10614                 dev_err(&pdev->dev, "failed to enable PCI device\n");
10615                 return ret;
10616         }
10617
10618         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10619         if (ret) {
10620                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10621                 if (ret) {
10622                         dev_err(&pdev->dev,
10623                                 "can't set consistent PCI DMA");
10624                         goto err_disable_device;
10625                 }
10626                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10627         }
10628
10629         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10630         if (ret) {
10631                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10632                 goto err_disable_device;
10633         }
10634
10635         pci_set_master(pdev);
10636         hw = &hdev->hw;
10637         hw->io_base = pcim_iomap(pdev, 2, 0);
10638         if (!hw->io_base) {
10639                 dev_err(&pdev->dev, "Can't map configuration register space\n");
10640                 ret = -ENOMEM;
10641                 goto err_clr_master;
10642         }
10643
10644         ret = hclge_dev_mem_map(hdev);
10645         if (ret)
10646                 goto err_unmap_io_base;
10647
10648         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10649
10650         return 0;
10651
10652 err_unmap_io_base:
10653         pcim_iounmap(pdev, hdev->hw.io_base);
10654 err_clr_master:
10655         pci_clear_master(pdev);
10656         pci_release_regions(pdev);
10657 err_disable_device:
10658         pci_disable_device(pdev);
10659
10660         return ret;
10661 }
10662
10663 static void hclge_pci_uninit(struct hclge_dev *hdev)
10664 {
10665         struct pci_dev *pdev = hdev->pdev;
10666
10667         if (hdev->hw.mem_base)
10668                 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10669
10670         pcim_iounmap(pdev, hdev->hw.io_base);
10671         pci_free_irq_vectors(pdev);
10672         pci_clear_master(pdev);
10673         pci_release_mem_regions(pdev);
10674         pci_disable_device(pdev);
10675 }
10676
10677 static void hclge_state_init(struct hclge_dev *hdev)
10678 {
10679         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10680         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10681         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10682         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10683         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10684         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10685         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10686 }
10687
10688 static void hclge_state_uninit(struct hclge_dev *hdev)
10689 {
10690         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10691         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10692
10693         if (hdev->reset_timer.function)
10694                 del_timer_sync(&hdev->reset_timer);
10695         if (hdev->service_task.work.func)
10696                 cancel_delayed_work_sync(&hdev->service_task);
10697 }
10698
10699 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10700 {
10701 #define HCLGE_FLR_RETRY_WAIT_MS 500
10702 #define HCLGE_FLR_RETRY_CNT     5
10703
10704         struct hclge_dev *hdev = ae_dev->priv;
10705         int retry_cnt = 0;
10706         int ret;
10707
10708 retry:
10709         down(&hdev->reset_sem);
10710         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10711         hdev->reset_type = HNAE3_FLR_RESET;
10712         ret = hclge_reset_prepare(hdev);
10713         if (ret || hdev->reset_pending) {
10714                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10715                         ret);
10716                 if (hdev->reset_pending ||
10717                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10718                         dev_err(&hdev->pdev->dev,
10719                                 "reset_pending:0x%lx, retry_cnt:%d\n",
10720                                 hdev->reset_pending, retry_cnt);
10721                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10722                         up(&hdev->reset_sem);
10723                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
10724                         goto retry;
10725                 }
10726         }
10727
10728         /* disable misc vector before FLR done */
10729         hclge_enable_vector(&hdev->misc_vector, false);
10730         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10731         hdev->rst_stats.flr_rst_cnt++;
10732 }
10733
10734 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10735 {
10736         struct hclge_dev *hdev = ae_dev->priv;
10737         int ret;
10738
10739         hclge_enable_vector(&hdev->misc_vector, true);
10740
10741         ret = hclge_reset_rebuild(hdev);
10742         if (ret)
10743                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10744
10745         hdev->reset_type = HNAE3_NONE_RESET;
10746         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10747         up(&hdev->reset_sem);
10748 }
10749
10750 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10751 {
10752         u16 i;
10753
10754         for (i = 0; i < hdev->num_alloc_vport; i++) {
10755                 struct hclge_vport *vport = &hdev->vport[i];
10756                 int ret;
10757
10758                  /* Send cmd to clear VF's FUNC_RST_ING */
10759                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10760                 if (ret)
10761                         dev_warn(&hdev->pdev->dev,
10762                                  "clear vf(%u) rst failed %d!\n",
10763                                  vport->vport_id, ret);
10764         }
10765 }
10766
10767 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10768 {
10769         struct pci_dev *pdev = ae_dev->pdev;
10770         struct hclge_dev *hdev;
10771         int ret;
10772
10773         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10774         if (!hdev)
10775                 return -ENOMEM;
10776
10777         hdev->pdev = pdev;
10778         hdev->ae_dev = ae_dev;
10779         hdev->reset_type = HNAE3_NONE_RESET;
10780         hdev->reset_level = HNAE3_FUNC_RESET;
10781         ae_dev->priv = hdev;
10782
10783         /* HW supprt 2 layer vlan */
10784         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10785
10786         mutex_init(&hdev->vport_lock);
10787         spin_lock_init(&hdev->fd_rule_lock);
10788         sema_init(&hdev->reset_sem, 1);
10789
10790         ret = hclge_pci_init(hdev);
10791         if (ret)
10792                 goto out;
10793
10794         /* Firmware command queue initialize */
10795         ret = hclge_cmd_queue_init(hdev);
10796         if (ret)
10797                 goto err_pci_uninit;
10798
10799         /* Firmware command initialize */
10800         ret = hclge_cmd_init(hdev);
10801         if (ret)
10802                 goto err_cmd_uninit;
10803
10804         ret = hclge_get_cap(hdev);
10805         if (ret)
10806                 goto err_cmd_uninit;
10807
10808         ret = hclge_query_dev_specs(hdev);
10809         if (ret) {
10810                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10811                         ret);
10812                 goto err_cmd_uninit;
10813         }
10814
10815         ret = hclge_configure(hdev);
10816         if (ret) {
10817                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10818                 goto err_cmd_uninit;
10819         }
10820
10821         ret = hclge_init_msi(hdev);
10822         if (ret) {
10823                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10824                 goto err_cmd_uninit;
10825         }
10826
10827         ret = hclge_misc_irq_init(hdev);
10828         if (ret)
10829                 goto err_msi_uninit;
10830
10831         ret = hclge_alloc_tqps(hdev);
10832         if (ret) {
10833                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10834                 goto err_msi_irq_uninit;
10835         }
10836
10837         ret = hclge_alloc_vport(hdev);
10838         if (ret)
10839                 goto err_msi_irq_uninit;
10840
10841         ret = hclge_map_tqp(hdev);
10842         if (ret)
10843                 goto err_msi_irq_uninit;
10844
10845         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
10846             !hnae3_dev_phy_imp_supported(hdev)) {
10847                 ret = hclge_mac_mdio_config(hdev);
10848                 if (ret)
10849                         goto err_msi_irq_uninit;
10850         }
10851
10852         ret = hclge_init_umv_space(hdev);
10853         if (ret)
10854                 goto err_mdiobus_unreg;
10855
10856         ret = hclge_mac_init(hdev);
10857         if (ret) {
10858                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10859                 goto err_mdiobus_unreg;
10860         }
10861
10862         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10863         if (ret) {
10864                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10865                 goto err_mdiobus_unreg;
10866         }
10867
10868         ret = hclge_config_gro(hdev, true);
10869         if (ret)
10870                 goto err_mdiobus_unreg;
10871
10872         ret = hclge_init_vlan_config(hdev);
10873         if (ret) {
10874                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10875                 goto err_mdiobus_unreg;
10876         }
10877
10878         ret = hclge_tm_schd_init(hdev);
10879         if (ret) {
10880                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10881                 goto err_mdiobus_unreg;
10882         }
10883
10884         ret = hclge_rss_init_cfg(hdev);
10885         if (ret) {
10886                 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
10887                 goto err_mdiobus_unreg;
10888         }
10889
10890         ret = hclge_rss_init_hw(hdev);
10891         if (ret) {
10892                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10893                 goto err_mdiobus_unreg;
10894         }
10895
10896         ret = init_mgr_tbl(hdev);
10897         if (ret) {
10898                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10899                 goto err_mdiobus_unreg;
10900         }
10901
10902         ret = hclge_init_fd_config(hdev);
10903         if (ret) {
10904                 dev_err(&pdev->dev,
10905                         "fd table init fail, ret=%d\n", ret);
10906                 goto err_mdiobus_unreg;
10907         }
10908
10909         INIT_KFIFO(hdev->mac_tnl_log);
10910
10911         hclge_dcb_ops_set(hdev);
10912
10913         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10914         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10915
10916         /* Setup affinity after service timer setup because add_timer_on
10917          * is called in affinity notify.
10918          */
10919         hclge_misc_affinity_setup(hdev);
10920
10921         hclge_clear_all_event_cause(hdev);
10922         hclge_clear_resetting_state(hdev);
10923
10924         /* Log and clear the hw errors those already occurred */
10925         hclge_handle_all_hns_hw_errors(ae_dev);
10926
10927         /* request delayed reset for the error recovery because an immediate
10928          * global reset on a PF affecting pending initialization of other PFs
10929          */
10930         if (ae_dev->hw_err_reset_req) {
10931                 enum hnae3_reset_type reset_level;
10932
10933                 reset_level = hclge_get_reset_level(ae_dev,
10934                                                     &ae_dev->hw_err_reset_req);
10935                 hclge_set_def_reset_request(ae_dev, reset_level);
10936                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10937         }
10938
10939         /* Enable MISC vector(vector0) */
10940         hclge_enable_vector(&hdev->misc_vector, true);
10941
10942         hclge_state_init(hdev);
10943         hdev->last_reset_time = jiffies;
10944
10945         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10946                  HCLGE_DRIVER_NAME);
10947
10948         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10949
10950         return 0;
10951
10952 err_mdiobus_unreg:
10953         if (hdev->hw.mac.phydev)
10954                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10955 err_msi_irq_uninit:
10956         hclge_misc_irq_uninit(hdev);
10957 err_msi_uninit:
10958         pci_free_irq_vectors(pdev);
10959 err_cmd_uninit:
10960         hclge_cmd_uninit(hdev);
10961 err_pci_uninit:
10962         pcim_iounmap(pdev, hdev->hw.io_base);
10963         pci_clear_master(pdev);
10964         pci_release_regions(pdev);
10965         pci_disable_device(pdev);
10966 out:
10967         mutex_destroy(&hdev->vport_lock);
10968         return ret;
10969 }
10970
10971 static void hclge_stats_clear(struct hclge_dev *hdev)
10972 {
10973         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10974 }
10975
10976 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10977 {
10978         return hclge_config_switch_param(hdev, vf, enable,
10979                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
10980 }
10981
10982 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10983 {
10984         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10985                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
10986                                           enable, vf);
10987 }
10988
10989 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10990 {
10991         int ret;
10992
10993         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10994         if (ret) {
10995                 dev_err(&hdev->pdev->dev,
10996                         "Set vf %d mac spoof check %s failed, ret=%d\n",
10997                         vf, enable ? "on" : "off", ret);
10998                 return ret;
10999         }
11000
11001         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11002         if (ret)
11003                 dev_err(&hdev->pdev->dev,
11004                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
11005                         vf, enable ? "on" : "off", ret);
11006
11007         return ret;
11008 }
11009
11010 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11011                                  bool enable)
11012 {
11013         struct hclge_vport *vport = hclge_get_vport(handle);
11014         struct hclge_dev *hdev = vport->back;
11015         u32 new_spoofchk = enable ? 1 : 0;
11016         int ret;
11017
11018         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11019                 return -EOPNOTSUPP;
11020
11021         vport = hclge_get_vf_vport(hdev, vf);
11022         if (!vport)
11023                 return -EINVAL;
11024
11025         if (vport->vf_info.spoofchk == new_spoofchk)
11026                 return 0;
11027
11028         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11029                 dev_warn(&hdev->pdev->dev,
11030                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11031                          vf);
11032         else if (enable && hclge_is_umv_space_full(vport, true))
11033                 dev_warn(&hdev->pdev->dev,
11034                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11035                          vf);
11036
11037         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11038         if (ret)
11039                 return ret;
11040
11041         vport->vf_info.spoofchk = new_spoofchk;
11042         return 0;
11043 }
11044
11045 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11046 {
11047         struct hclge_vport *vport = hdev->vport;
11048         int ret;
11049         int i;
11050
11051         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11052                 return 0;
11053
11054         /* resume the vf spoof check state after reset */
11055         for (i = 0; i < hdev->num_alloc_vport; i++) {
11056                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11057                                                vport->vf_info.spoofchk);
11058                 if (ret)
11059                         return ret;
11060
11061                 vport++;
11062         }
11063
11064         return 0;
11065 }
11066
11067 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11068 {
11069         struct hclge_vport *vport = hclge_get_vport(handle);
11070         struct hclge_dev *hdev = vport->back;
11071         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
11072         u32 new_trusted = enable ? 1 : 0;
11073         bool en_bc_pmc;
11074         int ret;
11075
11076         vport = hclge_get_vf_vport(hdev, vf);
11077         if (!vport)
11078                 return -EINVAL;
11079
11080         if (vport->vf_info.trusted == new_trusted)
11081                 return 0;
11082
11083         /* Disable promisc mode for VF if it is not trusted any more. */
11084         if (!enable && vport->vf_info.promisc_enable) {
11085                 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
11086                 ret = hclge_set_vport_promisc_mode(vport, false, false,
11087                                                    en_bc_pmc);
11088                 if (ret)
11089                         return ret;
11090                 vport->vf_info.promisc_enable = 0;
11091                 hclge_inform_vf_promisc_info(vport);
11092         }
11093
11094         vport->vf_info.trusted = new_trusted;
11095
11096         return 0;
11097 }
11098
11099 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11100 {
11101         int ret;
11102         int vf;
11103
11104         /* reset vf rate to default value */
11105         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11106                 struct hclge_vport *vport = &hdev->vport[vf];
11107
11108                 vport->vf_info.max_tx_rate = 0;
11109                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11110                 if (ret)
11111                         dev_err(&hdev->pdev->dev,
11112                                 "vf%d failed to reset to default, ret=%d\n",
11113                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
11114         }
11115 }
11116
11117 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11118                                      int min_tx_rate, int max_tx_rate)
11119 {
11120         if (min_tx_rate != 0 ||
11121             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11122                 dev_err(&hdev->pdev->dev,
11123                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11124                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11125                 return -EINVAL;
11126         }
11127
11128         return 0;
11129 }
11130
11131 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11132                              int min_tx_rate, int max_tx_rate, bool force)
11133 {
11134         struct hclge_vport *vport = hclge_get_vport(handle);
11135         struct hclge_dev *hdev = vport->back;
11136         int ret;
11137
11138         ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11139         if (ret)
11140                 return ret;
11141
11142         vport = hclge_get_vf_vport(hdev, vf);
11143         if (!vport)
11144                 return -EINVAL;
11145
11146         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11147                 return 0;
11148
11149         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11150         if (ret)
11151                 return ret;
11152
11153         vport->vf_info.max_tx_rate = max_tx_rate;
11154
11155         return 0;
11156 }
11157
11158 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11159 {
11160         struct hnae3_handle *handle = &hdev->vport->nic;
11161         struct hclge_vport *vport;
11162         int ret;
11163         int vf;
11164
11165         /* resume the vf max_tx_rate after reset */
11166         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11167                 vport = hclge_get_vf_vport(hdev, vf);
11168                 if (!vport)
11169                         return -EINVAL;
11170
11171                 /* zero means max rate, after reset, firmware already set it to
11172                  * max rate, so just continue.
11173                  */
11174                 if (!vport->vf_info.max_tx_rate)
11175                         continue;
11176
11177                 ret = hclge_set_vf_rate(handle, vf, 0,
11178                                         vport->vf_info.max_tx_rate, true);
11179                 if (ret) {
11180                         dev_err(&hdev->pdev->dev,
11181                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11182                                 vf, vport->vf_info.max_tx_rate, ret);
11183                         return ret;
11184                 }
11185         }
11186
11187         return 0;
11188 }
11189
11190 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11191 {
11192         struct hclge_vport *vport = hdev->vport;
11193         int i;
11194
11195         for (i = 0; i < hdev->num_alloc_vport; i++) {
11196                 hclge_vport_stop(vport);
11197                 vport++;
11198         }
11199 }
11200
11201 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11202 {
11203         struct hclge_dev *hdev = ae_dev->priv;
11204         struct pci_dev *pdev = ae_dev->pdev;
11205         int ret;
11206
11207         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11208
11209         hclge_stats_clear(hdev);
11210         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11211          * so here should not clean table in memory.
11212          */
11213         if (hdev->reset_type == HNAE3_IMP_RESET ||
11214             hdev->reset_type == HNAE3_GLOBAL_RESET) {
11215                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11216                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11217                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11218                 hclge_reset_umv_space(hdev);
11219         }
11220
11221         ret = hclge_cmd_init(hdev);
11222         if (ret) {
11223                 dev_err(&pdev->dev, "Cmd queue init failed\n");
11224                 return ret;
11225         }
11226
11227         ret = hclge_map_tqp(hdev);
11228         if (ret) {
11229                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11230                 return ret;
11231         }
11232
11233         ret = hclge_mac_init(hdev);
11234         if (ret) {
11235                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11236                 return ret;
11237         }
11238
11239         ret = hclge_tp_port_init(hdev);
11240         if (ret) {
11241                 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11242                         ret);
11243                 return ret;
11244         }
11245
11246         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11247         if (ret) {
11248                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11249                 return ret;
11250         }
11251
11252         ret = hclge_config_gro(hdev, true);
11253         if (ret)
11254                 return ret;
11255
11256         ret = hclge_init_vlan_config(hdev);
11257         if (ret) {
11258                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11259                 return ret;
11260         }
11261
11262         ret = hclge_tm_init_hw(hdev, true);
11263         if (ret) {
11264                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11265                 return ret;
11266         }
11267
11268         ret = hclge_rss_init_hw(hdev);
11269         if (ret) {
11270                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11271                 return ret;
11272         }
11273
11274         ret = init_mgr_tbl(hdev);
11275         if (ret) {
11276                 dev_err(&pdev->dev,
11277                         "failed to reinit manager table, ret = %d\n", ret);
11278                 return ret;
11279         }
11280
11281         ret = hclge_init_fd_config(hdev);
11282         if (ret) {
11283                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11284                 return ret;
11285         }
11286
11287         /* Log and clear the hw errors those already occurred */
11288         hclge_handle_all_hns_hw_errors(ae_dev);
11289
11290         /* Re-enable the hw error interrupts because
11291          * the interrupts get disabled on global reset.
11292          */
11293         ret = hclge_config_nic_hw_error(hdev, true);
11294         if (ret) {
11295                 dev_err(&pdev->dev,
11296                         "fail(%d) to re-enable NIC hw error interrupts\n",
11297                         ret);
11298                 return ret;
11299         }
11300
11301         if (hdev->roce_client) {
11302                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11303                 if (ret) {
11304                         dev_err(&pdev->dev,
11305                                 "fail(%d) to re-enable roce ras interrupts\n",
11306                                 ret);
11307                         return ret;
11308                 }
11309         }
11310
11311         hclge_reset_vport_state(hdev);
11312         ret = hclge_reset_vport_spoofchk(hdev);
11313         if (ret)
11314                 return ret;
11315
11316         ret = hclge_resume_vf_rate(hdev);
11317         if (ret)
11318                 return ret;
11319
11320         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11321                  HCLGE_DRIVER_NAME);
11322
11323         return 0;
11324 }
11325
11326 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11327 {
11328         struct hclge_dev *hdev = ae_dev->priv;
11329         struct hclge_mac *mac = &hdev->hw.mac;
11330
11331         hclge_reset_vf_rate(hdev);
11332         hclge_clear_vf_vlan(hdev);
11333         hclge_misc_affinity_teardown(hdev);
11334         hclge_state_uninit(hdev);
11335         hclge_uninit_mac_table(hdev);
11336
11337         if (mac->phydev)
11338                 mdiobus_unregister(mac->mdio_bus);
11339
11340         /* Disable MISC vector(vector0) */
11341         hclge_enable_vector(&hdev->misc_vector, false);
11342         synchronize_irq(hdev->misc_vector.vector_irq);
11343
11344         /* Disable all hw interrupts */
11345         hclge_config_mac_tnl_int(hdev, false);
11346         hclge_config_nic_hw_error(hdev, false);
11347         hclge_config_rocee_ras_interrupt(hdev, false);
11348
11349         hclge_cmd_uninit(hdev);
11350         hclge_misc_irq_uninit(hdev);
11351         hclge_pci_uninit(hdev);
11352         mutex_destroy(&hdev->vport_lock);
11353         hclge_uninit_vport_vlan_table(hdev);
11354         ae_dev->priv = NULL;
11355 }
11356
11357 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11358 {
11359         struct hclge_vport *vport = hclge_get_vport(handle);
11360         struct hclge_dev *hdev = vport->back;
11361
11362         return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11363 }
11364
11365 static void hclge_get_channels(struct hnae3_handle *handle,
11366                                struct ethtool_channels *ch)
11367 {
11368         ch->max_combined = hclge_get_max_channels(handle);
11369         ch->other_count = 1;
11370         ch->max_other = 1;
11371         ch->combined_count = handle->kinfo.rss_size;
11372 }
11373
11374 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11375                                         u16 *alloc_tqps, u16 *max_rss_size)
11376 {
11377         struct hclge_vport *vport = hclge_get_vport(handle);
11378         struct hclge_dev *hdev = vport->back;
11379
11380         *alloc_tqps = vport->alloc_tqps;
11381         *max_rss_size = hdev->pf_rss_size_max;
11382 }
11383
11384 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11385                               bool rxfh_configured)
11386 {
11387         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11388         struct hclge_vport *vport = hclge_get_vport(handle);
11389         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11390         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11391         struct hclge_dev *hdev = vport->back;
11392         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11393         u16 cur_rss_size = kinfo->rss_size;
11394         u16 cur_tqps = kinfo->num_tqps;
11395         u16 tc_valid[HCLGE_MAX_TC_NUM];
11396         u16 roundup_size;
11397         u32 *rss_indir;
11398         unsigned int i;
11399         int ret;
11400
11401         kinfo->req_rss_size = new_tqps_num;
11402
11403         ret = hclge_tm_vport_map_update(hdev);
11404         if (ret) {
11405                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11406                 return ret;
11407         }
11408
11409         roundup_size = roundup_pow_of_two(kinfo->rss_size);
11410         roundup_size = ilog2(roundup_size);
11411         /* Set the RSS TC mode according to the new RSS size */
11412         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11413                 tc_valid[i] = 0;
11414
11415                 if (!(hdev->hw_tc_map & BIT(i)))
11416                         continue;
11417
11418                 tc_valid[i] = 1;
11419                 tc_size[i] = roundup_size;
11420                 tc_offset[i] = kinfo->rss_size * i;
11421         }
11422         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11423         if (ret)
11424                 return ret;
11425
11426         /* RSS indirection table has been configuared by user */
11427         if (rxfh_configured)
11428                 goto out;
11429
11430         /* Reinitializes the rss indirect table according to the new RSS size */
11431         rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11432                             GFP_KERNEL);
11433         if (!rss_indir)
11434                 return -ENOMEM;
11435
11436         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11437                 rss_indir[i] = i % kinfo->rss_size;
11438
11439         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11440         if (ret)
11441                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11442                         ret);
11443
11444         kfree(rss_indir);
11445
11446 out:
11447         if (!ret)
11448                 dev_info(&hdev->pdev->dev,
11449                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11450                          cur_rss_size, kinfo->rss_size,
11451                          cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11452
11453         return ret;
11454 }
11455
11456 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11457                               u32 *regs_num_64_bit)
11458 {
11459         struct hclge_desc desc;
11460         u32 total_num;
11461         int ret;
11462
11463         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11464         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11465         if (ret) {
11466                 dev_err(&hdev->pdev->dev,
11467                         "Query register number cmd failed, ret = %d.\n", ret);
11468                 return ret;
11469         }
11470
11471         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11472         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11473
11474         total_num = *regs_num_32_bit + *regs_num_64_bit;
11475         if (!total_num)
11476                 return -EINVAL;
11477
11478         return 0;
11479 }
11480
11481 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11482                                  void *data)
11483 {
11484 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11485 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11486
11487         struct hclge_desc *desc;
11488         u32 *reg_val = data;
11489         __le32 *desc_data;
11490         int nodata_num;
11491         int cmd_num;
11492         int i, k, n;
11493         int ret;
11494
11495         if (regs_num == 0)
11496                 return 0;
11497
11498         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11499         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11500                                HCLGE_32_BIT_REG_RTN_DATANUM);
11501         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11502         if (!desc)
11503                 return -ENOMEM;
11504
11505         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11506         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11507         if (ret) {
11508                 dev_err(&hdev->pdev->dev,
11509                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
11510                 kfree(desc);
11511                 return ret;
11512         }
11513
11514         for (i = 0; i < cmd_num; i++) {
11515                 if (i == 0) {
11516                         desc_data = (__le32 *)(&desc[i].data[0]);
11517                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11518                 } else {
11519                         desc_data = (__le32 *)(&desc[i]);
11520                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
11521                 }
11522                 for (k = 0; k < n; k++) {
11523                         *reg_val++ = le32_to_cpu(*desc_data++);
11524
11525                         regs_num--;
11526                         if (!regs_num)
11527                                 break;
11528                 }
11529         }
11530
11531         kfree(desc);
11532         return 0;
11533 }
11534
11535 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11536                                  void *data)
11537 {
11538 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11539 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11540
11541         struct hclge_desc *desc;
11542         u64 *reg_val = data;
11543         __le64 *desc_data;
11544         int nodata_len;
11545         int cmd_num;
11546         int i, k, n;
11547         int ret;
11548
11549         if (regs_num == 0)
11550                 return 0;
11551
11552         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11553         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11554                                HCLGE_64_BIT_REG_RTN_DATANUM);
11555         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11556         if (!desc)
11557                 return -ENOMEM;
11558
11559         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11560         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11561         if (ret) {
11562                 dev_err(&hdev->pdev->dev,
11563                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
11564                 kfree(desc);
11565                 return ret;
11566         }
11567
11568         for (i = 0; i < cmd_num; i++) {
11569                 if (i == 0) {
11570                         desc_data = (__le64 *)(&desc[i].data[0]);
11571                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11572                 } else {
11573                         desc_data = (__le64 *)(&desc[i]);
11574                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
11575                 }
11576                 for (k = 0; k < n; k++) {
11577                         *reg_val++ = le64_to_cpu(*desc_data++);
11578
11579                         regs_num--;
11580                         if (!regs_num)
11581                                 break;
11582                 }
11583         }
11584
11585         kfree(desc);
11586         return 0;
11587 }
11588
11589 #define MAX_SEPARATE_NUM        4
11590 #define SEPARATOR_VALUE         0xFDFCFBFA
11591 #define REG_NUM_PER_LINE        4
11592 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
11593 #define REG_SEPARATOR_LINE      1
11594 #define REG_NUM_REMAIN_MASK     3
11595 #define BD_LIST_MAX_NUM         30
11596
11597 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11598 {
11599         int i;
11600
11601         /* initialize command BD except the last one */
11602         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11603                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11604                                            true);
11605                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11606         }
11607
11608         /* initialize the last command BD */
11609         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11610
11611         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11612 }
11613
11614 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11615                                     int *bd_num_list,
11616                                     u32 type_num)
11617 {
11618         u32 entries_per_desc, desc_index, index, offset, i;
11619         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11620         int ret;
11621
11622         ret = hclge_query_bd_num_cmd_send(hdev, desc);
11623         if (ret) {
11624                 dev_err(&hdev->pdev->dev,
11625                         "Get dfx bd num fail, status is %d.\n", ret);
11626                 return ret;
11627         }
11628
11629         entries_per_desc = ARRAY_SIZE(desc[0].data);
11630         for (i = 0; i < type_num; i++) {
11631                 offset = hclge_dfx_bd_offset_list[i];
11632                 index = offset % entries_per_desc;
11633                 desc_index = offset / entries_per_desc;
11634                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11635         }
11636
11637         return ret;
11638 }
11639
11640 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11641                                   struct hclge_desc *desc_src, int bd_num,
11642                                   enum hclge_opcode_type cmd)
11643 {
11644         struct hclge_desc *desc = desc_src;
11645         int i, ret;
11646
11647         hclge_cmd_setup_basic_desc(desc, cmd, true);
11648         for (i = 0; i < bd_num - 1; i++) {
11649                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11650                 desc++;
11651                 hclge_cmd_setup_basic_desc(desc, cmd, true);
11652         }
11653
11654         desc = desc_src;
11655         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11656         if (ret)
11657                 dev_err(&hdev->pdev->dev,
11658                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11659                         cmd, ret);
11660
11661         return ret;
11662 }
11663
11664 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11665                                     void *data)
11666 {
11667         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11668         struct hclge_desc *desc = desc_src;
11669         u32 *reg = data;
11670
11671         entries_per_desc = ARRAY_SIZE(desc->data);
11672         reg_num = entries_per_desc * bd_num;
11673         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11674         for (i = 0; i < reg_num; i++) {
11675                 index = i % entries_per_desc;
11676                 desc_index = i / entries_per_desc;
11677                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
11678         }
11679         for (i = 0; i < separator_num; i++)
11680                 *reg++ = SEPARATOR_VALUE;
11681
11682         return reg_num + separator_num;
11683 }
11684
11685 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11686 {
11687         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11688         int data_len_per_desc, bd_num, i;
11689         int bd_num_list[BD_LIST_MAX_NUM];
11690         u32 data_len;
11691         int ret;
11692
11693         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11694         if (ret) {
11695                 dev_err(&hdev->pdev->dev,
11696                         "Get dfx reg bd num fail, status is %d.\n", ret);
11697                 return ret;
11698         }
11699
11700         data_len_per_desc = sizeof_field(struct hclge_desc, data);
11701         *len = 0;
11702         for (i = 0; i < dfx_reg_type_num; i++) {
11703                 bd_num = bd_num_list[i];
11704                 data_len = data_len_per_desc * bd_num;
11705                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11706         }
11707
11708         return ret;
11709 }
11710
11711 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11712 {
11713         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11714         int bd_num, bd_num_max, buf_len, i;
11715         int bd_num_list[BD_LIST_MAX_NUM];
11716         struct hclge_desc *desc_src;
11717         u32 *reg = data;
11718         int ret;
11719
11720         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11721         if (ret) {
11722                 dev_err(&hdev->pdev->dev,
11723                         "Get dfx reg bd num fail, status is %d.\n", ret);
11724                 return ret;
11725         }
11726
11727         bd_num_max = bd_num_list[0];
11728         for (i = 1; i < dfx_reg_type_num; i++)
11729                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11730
11731         buf_len = sizeof(*desc_src) * bd_num_max;
11732         desc_src = kzalloc(buf_len, GFP_KERNEL);
11733         if (!desc_src)
11734                 return -ENOMEM;
11735
11736         for (i = 0; i < dfx_reg_type_num; i++) {
11737                 bd_num = bd_num_list[i];
11738                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11739                                              hclge_dfx_reg_opcode_list[i]);
11740                 if (ret) {
11741                         dev_err(&hdev->pdev->dev,
11742                                 "Get dfx reg fail, status is %d.\n", ret);
11743                         break;
11744                 }
11745
11746                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11747         }
11748
11749         kfree(desc_src);
11750         return ret;
11751 }
11752
11753 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11754                               struct hnae3_knic_private_info *kinfo)
11755 {
11756 #define HCLGE_RING_REG_OFFSET           0x200
11757 #define HCLGE_RING_INT_REG_OFFSET       0x4
11758
11759         int i, j, reg_num, separator_num;
11760         int data_num_sum;
11761         u32 *reg = data;
11762
11763         /* fetching per-PF registers valus from PF PCIe register space */
11764         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11765         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11766         for (i = 0; i < reg_num; i++)
11767                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11768         for (i = 0; i < separator_num; i++)
11769                 *reg++ = SEPARATOR_VALUE;
11770         data_num_sum = reg_num + separator_num;
11771
11772         reg_num = ARRAY_SIZE(common_reg_addr_list);
11773         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11774         for (i = 0; i < reg_num; i++)
11775                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11776         for (i = 0; i < separator_num; i++)
11777                 *reg++ = SEPARATOR_VALUE;
11778         data_num_sum += reg_num + separator_num;
11779
11780         reg_num = ARRAY_SIZE(ring_reg_addr_list);
11781         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11782         for (j = 0; j < kinfo->num_tqps; j++) {
11783                 for (i = 0; i < reg_num; i++)
11784                         *reg++ = hclge_read_dev(&hdev->hw,
11785                                                 ring_reg_addr_list[i] +
11786                                                 HCLGE_RING_REG_OFFSET * j);
11787                 for (i = 0; i < separator_num; i++)
11788                         *reg++ = SEPARATOR_VALUE;
11789         }
11790         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11791
11792         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11793         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11794         for (j = 0; j < hdev->num_msi_used - 1; j++) {
11795                 for (i = 0; i < reg_num; i++)
11796                         *reg++ = hclge_read_dev(&hdev->hw,
11797                                                 tqp_intr_reg_addr_list[i] +
11798                                                 HCLGE_RING_INT_REG_OFFSET * j);
11799                 for (i = 0; i < separator_num; i++)
11800                         *reg++ = SEPARATOR_VALUE;
11801         }
11802         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11803
11804         return data_num_sum;
11805 }
11806
11807 static int hclge_get_regs_len(struct hnae3_handle *handle)
11808 {
11809         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11810         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11811         struct hclge_vport *vport = hclge_get_vport(handle);
11812         struct hclge_dev *hdev = vport->back;
11813         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11814         int regs_lines_32_bit, regs_lines_64_bit;
11815         int ret;
11816
11817         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11818         if (ret) {
11819                 dev_err(&hdev->pdev->dev,
11820                         "Get register number failed, ret = %d.\n", ret);
11821                 return ret;
11822         }
11823
11824         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11825         if (ret) {
11826                 dev_err(&hdev->pdev->dev,
11827                         "Get dfx reg len failed, ret = %d.\n", ret);
11828                 return ret;
11829         }
11830
11831         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11832                 REG_SEPARATOR_LINE;
11833         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11834                 REG_SEPARATOR_LINE;
11835         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11836                 REG_SEPARATOR_LINE;
11837         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11838                 REG_SEPARATOR_LINE;
11839         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11840                 REG_SEPARATOR_LINE;
11841         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11842                 REG_SEPARATOR_LINE;
11843
11844         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11845                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11846                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11847 }
11848
11849 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11850                            void *data)
11851 {
11852         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11853         struct hclge_vport *vport = hclge_get_vport(handle);
11854         struct hclge_dev *hdev = vport->back;
11855         u32 regs_num_32_bit, regs_num_64_bit;
11856         int i, reg_num, separator_num, ret;
11857         u32 *reg = data;
11858
11859         *version = hdev->fw_version;
11860
11861         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11862         if (ret) {
11863                 dev_err(&hdev->pdev->dev,
11864                         "Get register number failed, ret = %d.\n", ret);
11865                 return;
11866         }
11867
11868         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11869
11870         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11871         if (ret) {
11872                 dev_err(&hdev->pdev->dev,
11873                         "Get 32 bit register failed, ret = %d.\n", ret);
11874                 return;
11875         }
11876         reg_num = regs_num_32_bit;
11877         reg += reg_num;
11878         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11879         for (i = 0; i < separator_num; i++)
11880                 *reg++ = SEPARATOR_VALUE;
11881
11882         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11883         if (ret) {
11884                 dev_err(&hdev->pdev->dev,
11885                         "Get 64 bit register failed, ret = %d.\n", ret);
11886                 return;
11887         }
11888         reg_num = regs_num_64_bit * 2;
11889         reg += reg_num;
11890         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11891         for (i = 0; i < separator_num; i++)
11892                 *reg++ = SEPARATOR_VALUE;
11893
11894         ret = hclge_get_dfx_reg(hdev, reg);
11895         if (ret)
11896                 dev_err(&hdev->pdev->dev,
11897                         "Get dfx register failed, ret = %d.\n", ret);
11898 }
11899
11900 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11901 {
11902         struct hclge_set_led_state_cmd *req;
11903         struct hclge_desc desc;
11904         int ret;
11905
11906         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11907
11908         req = (struct hclge_set_led_state_cmd *)desc.data;
11909         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11910                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11911
11912         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11913         if (ret)
11914                 dev_err(&hdev->pdev->dev,
11915                         "Send set led state cmd error, ret =%d\n", ret);
11916
11917         return ret;
11918 }
11919
11920 enum hclge_led_status {
11921         HCLGE_LED_OFF,
11922         HCLGE_LED_ON,
11923         HCLGE_LED_NO_CHANGE = 0xFF,
11924 };
11925
11926 static int hclge_set_led_id(struct hnae3_handle *handle,
11927                             enum ethtool_phys_id_state status)
11928 {
11929         struct hclge_vport *vport = hclge_get_vport(handle);
11930         struct hclge_dev *hdev = vport->back;
11931
11932         switch (status) {
11933         case ETHTOOL_ID_ACTIVE:
11934                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11935         case ETHTOOL_ID_INACTIVE:
11936                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11937         default:
11938                 return -EINVAL;
11939         }
11940 }
11941
11942 static void hclge_get_link_mode(struct hnae3_handle *handle,
11943                                 unsigned long *supported,
11944                                 unsigned long *advertising)
11945 {
11946         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11947         struct hclge_vport *vport = hclge_get_vport(handle);
11948         struct hclge_dev *hdev = vport->back;
11949         unsigned int idx = 0;
11950
11951         for (; idx < size; idx++) {
11952                 supported[idx] = hdev->hw.mac.supported[idx];
11953                 advertising[idx] = hdev->hw.mac.advertising[idx];
11954         }
11955 }
11956
11957 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11958 {
11959         struct hclge_vport *vport = hclge_get_vport(handle);
11960         struct hclge_dev *hdev = vport->back;
11961
11962         return hclge_config_gro(hdev, enable);
11963 }
11964
11965 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11966 {
11967         struct hclge_vport *vport = &hdev->vport[0];
11968         struct hnae3_handle *handle = &vport->nic;
11969         u8 tmp_flags;
11970         int ret;
11971
11972         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11973                 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11974                 vport->last_promisc_flags = vport->overflow_promisc_flags;
11975         }
11976
11977         if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11978                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11979                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11980                                              tmp_flags & HNAE3_MPE);
11981                 if (!ret) {
11982                         clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11983                         hclge_enable_vlan_filter(handle,
11984                                                  tmp_flags & HNAE3_VLAN_FLTR);
11985                 }
11986         }
11987 }
11988
11989 static bool hclge_module_existed(struct hclge_dev *hdev)
11990 {
11991         struct hclge_desc desc;
11992         u32 existed;
11993         int ret;
11994
11995         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11996         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11997         if (ret) {
11998                 dev_err(&hdev->pdev->dev,
11999                         "failed to get SFP exist state, ret = %d\n", ret);
12000                 return false;
12001         }
12002
12003         existed = le32_to_cpu(desc.data[0]);
12004
12005         return existed != 0;
12006 }
12007
12008 /* need 6 bds(total 140 bytes) in one reading
12009  * return the number of bytes actually read, 0 means read failed.
12010  */
12011 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12012                                      u32 len, u8 *data)
12013 {
12014         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12015         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12016         u16 read_len;
12017         u16 copy_len;
12018         int ret;
12019         int i;
12020
12021         /* setup all 6 bds to read module eeprom info. */
12022         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12023                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12024                                            true);
12025
12026                 /* bd0~bd4 need next flag */
12027                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12028                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12029         }
12030
12031         /* setup bd0, this bd contains offset and read length. */
12032         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12033         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12034         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12035         sfp_info_bd0->read_len = cpu_to_le16(read_len);
12036
12037         ret = hclge_cmd_send(&hdev->hw, desc, i);
12038         if (ret) {
12039                 dev_err(&hdev->pdev->dev,
12040                         "failed to get SFP eeprom info, ret = %d\n", ret);
12041                 return 0;
12042         }
12043
12044         /* copy sfp info from bd0 to out buffer. */
12045         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12046         memcpy(data, sfp_info_bd0->data, copy_len);
12047         read_len = copy_len;
12048
12049         /* copy sfp info from bd1~bd5 to out buffer if needed. */
12050         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12051                 if (read_len >= len)
12052                         return read_len;
12053
12054                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12055                 memcpy(data + read_len, desc[i].data, copy_len);
12056                 read_len += copy_len;
12057         }
12058
12059         return read_len;
12060 }
12061
12062 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12063                                    u32 len, u8 *data)
12064 {
12065         struct hclge_vport *vport = hclge_get_vport(handle);
12066         struct hclge_dev *hdev = vport->back;
12067         u32 read_len = 0;
12068         u16 data_len;
12069
12070         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12071                 return -EOPNOTSUPP;
12072
12073         if (!hclge_module_existed(hdev))
12074                 return -ENXIO;
12075
12076         while (read_len < len) {
12077                 data_len = hclge_get_sfp_eeprom_info(hdev,
12078                                                      offset + read_len,
12079                                                      len - read_len,
12080                                                      data + read_len);
12081                 if (!data_len)
12082                         return -EIO;
12083
12084                 read_len += data_len;
12085         }
12086
12087         return 0;
12088 }
12089
12090 static const struct hnae3_ae_ops hclge_ops = {
12091         .init_ae_dev = hclge_init_ae_dev,
12092         .uninit_ae_dev = hclge_uninit_ae_dev,
12093         .flr_prepare = hclge_flr_prepare,
12094         .flr_done = hclge_flr_done,
12095         .init_client_instance = hclge_init_client_instance,
12096         .uninit_client_instance = hclge_uninit_client_instance,
12097         .map_ring_to_vector = hclge_map_ring_to_vector,
12098         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12099         .get_vector = hclge_get_vector,
12100         .put_vector = hclge_put_vector,
12101         .set_promisc_mode = hclge_set_promisc_mode,
12102         .request_update_promisc_mode = hclge_request_update_promisc_mode,
12103         .set_loopback = hclge_set_loopback,
12104         .start = hclge_ae_start,
12105         .stop = hclge_ae_stop,
12106         .client_start = hclge_client_start,
12107         .client_stop = hclge_client_stop,
12108         .get_status = hclge_get_status,
12109         .get_ksettings_an_result = hclge_get_ksettings_an_result,
12110         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12111         .get_media_type = hclge_get_media_type,
12112         .check_port_speed = hclge_check_port_speed,
12113         .get_fec = hclge_get_fec,
12114         .set_fec = hclge_set_fec,
12115         .get_rss_key_size = hclge_get_rss_key_size,
12116         .get_rss = hclge_get_rss,
12117         .set_rss = hclge_set_rss,
12118         .set_rss_tuple = hclge_set_rss_tuple,
12119         .get_rss_tuple = hclge_get_rss_tuple,
12120         .get_tc_size = hclge_get_tc_size,
12121         .get_mac_addr = hclge_get_mac_addr,
12122         .set_mac_addr = hclge_set_mac_addr,
12123         .do_ioctl = hclge_do_ioctl,
12124         .add_uc_addr = hclge_add_uc_addr,
12125         .rm_uc_addr = hclge_rm_uc_addr,
12126         .add_mc_addr = hclge_add_mc_addr,
12127         .rm_mc_addr = hclge_rm_mc_addr,
12128         .set_autoneg = hclge_set_autoneg,
12129         .get_autoneg = hclge_get_autoneg,
12130         .restart_autoneg = hclge_restart_autoneg,
12131         .halt_autoneg = hclge_halt_autoneg,
12132         .get_pauseparam = hclge_get_pauseparam,
12133         .set_pauseparam = hclge_set_pauseparam,
12134         .set_mtu = hclge_set_mtu,
12135         .reset_queue = hclge_reset_tqp,
12136         .get_stats = hclge_get_stats,
12137         .get_mac_stats = hclge_get_mac_stat,
12138         .update_stats = hclge_update_stats,
12139         .get_strings = hclge_get_strings,
12140         .get_sset_count = hclge_get_sset_count,
12141         .get_fw_version = hclge_get_fw_version,
12142         .get_mdix_mode = hclge_get_mdix_mode,
12143         .enable_vlan_filter = hclge_enable_vlan_filter,
12144         .set_vlan_filter = hclge_set_vlan_filter,
12145         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12146         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12147         .reset_event = hclge_reset_event,
12148         .get_reset_level = hclge_get_reset_level,
12149         .set_default_reset_request = hclge_set_def_reset_request,
12150         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12151         .set_channels = hclge_set_channels,
12152         .get_channels = hclge_get_channels,
12153         .get_regs_len = hclge_get_regs_len,
12154         .get_regs = hclge_get_regs,
12155         .set_led_id = hclge_set_led_id,
12156         .get_link_mode = hclge_get_link_mode,
12157         .add_fd_entry = hclge_add_fd_entry,
12158         .del_fd_entry = hclge_del_fd_entry,
12159         .del_all_fd_entries = hclge_del_all_fd_entries,
12160         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12161         .get_fd_rule_info = hclge_get_fd_rule_info,
12162         .get_fd_all_rules = hclge_get_all_rules,
12163         .enable_fd = hclge_enable_fd,
12164         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12165         .dbg_run_cmd = hclge_dbg_run_cmd,
12166         .dbg_read_cmd = hclge_dbg_read_cmd,
12167         .handle_hw_ras_error = hclge_handle_hw_ras_error,
12168         .get_hw_reset_stat = hclge_get_hw_reset_stat,
12169         .ae_dev_resetting = hclge_ae_dev_resetting,
12170         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12171         .set_gro_en = hclge_gro_en,
12172         .get_global_queue_id = hclge_covert_handle_qid_global,
12173         .set_timer_task = hclge_set_timer_task,
12174         .mac_connect_phy = hclge_mac_connect_phy,
12175         .mac_disconnect_phy = hclge_mac_disconnect_phy,
12176         .get_vf_config = hclge_get_vf_config,
12177         .set_vf_link_state = hclge_set_vf_link_state,
12178         .set_vf_spoofchk = hclge_set_vf_spoofchk,
12179         .set_vf_trust = hclge_set_vf_trust,
12180         .set_vf_rate = hclge_set_vf_rate,
12181         .set_vf_mac = hclge_set_vf_mac,
12182         .get_module_eeprom = hclge_get_module_eeprom,
12183         .get_cmdq_stat = hclge_get_cmdq_stat,
12184         .add_cls_flower = hclge_add_cls_flower,
12185         .del_cls_flower = hclge_del_cls_flower,
12186         .cls_flower_active = hclge_is_cls_flower_active,
12187         .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12188         .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12189 };
12190
12191 static struct hnae3_ae_algo ae_algo = {
12192         .ops = &hclge_ops,
12193         .pdev_id_table = ae_algo_pci_tbl,
12194 };
12195
12196 static int hclge_init(void)
12197 {
12198         pr_info("%s is initializing\n", HCLGE_NAME);
12199
12200         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12201         if (!hclge_wq) {
12202                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12203                 return -ENOMEM;
12204         }
12205
12206         hnae3_register_ae_algo(&ae_algo);
12207
12208         return 0;
12209 }
12210
12211 static void hclge_exit(void)
12212 {
12213         hnae3_unregister_ae_algo(&ae_algo);
12214         destroy_workqueue(hclge_wq);
12215 }
12216 module_init(hclge_init);
12217 module_exit(hclge_exit);
12218
12219 MODULE_LICENSE("GPL");
12220 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12221 MODULE_DESCRIPTION("HCLGE Driver");
12222 MODULE_VERSION(HCLGE_MOD_VERSION);