Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 #include "hclge_devlink.h"
27
28 #define HCLGE_NAME                      "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31
32 #define HCLGE_BUF_SIZE_UNIT     256U
33 #define HCLGE_BUF_MUL_BY        2
34 #define HCLGE_BUF_DIV_BY        2
35 #define NEED_RESERVE_TC_NUM     2
36 #define BUF_MAX_PERCENT         100
37 #define BUF_RESERVE_PERCENT     90
38
39 #define HCLGE_RESET_MAX_FAIL_CNT        5
40 #define HCLGE_RESET_SYNC_TIME           100
41 #define HCLGE_PF_RESET_SYNC_TIME        20
42 #define HCLGE_PF_RESET_SYNC_CNT         1500
43
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET        1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
48 #define HCLGE_DFX_IGU_BD_OFFSET         4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
51 #define HCLGE_DFX_NCSI_BD_OFFSET        7
52 #define HCLGE_DFX_RTC_BD_OFFSET         8
53 #define HCLGE_DFX_PPP_BD_OFFSET         9
54 #define HCLGE_DFX_RCB_BD_OFFSET         10
55 #define HCLGE_DFX_TQP_BD_OFFSET         11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
57
58 #define HCLGE_LINK_STATUS_MS    10
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68                                                    unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
75
76 static struct hnae3_ae_algo ae_algo;
77
78 static struct workqueue_struct *hclge_wq;
79
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89         /* required last entry */
90         {0, }
91 };
92
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
96                                          HCLGE_CMDQ_TX_ADDR_H_REG,
97                                          HCLGE_CMDQ_TX_DEPTH_REG,
98                                          HCLGE_CMDQ_TX_TAIL_REG,
99                                          HCLGE_CMDQ_TX_HEAD_REG,
100                                          HCLGE_CMDQ_RX_ADDR_L_REG,
101                                          HCLGE_CMDQ_RX_ADDR_H_REG,
102                                          HCLGE_CMDQ_RX_DEPTH_REG,
103                                          HCLGE_CMDQ_RX_TAIL_REG,
104                                          HCLGE_CMDQ_RX_HEAD_REG,
105                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
106                                          HCLGE_CMDQ_INTR_STS_REG,
107                                          HCLGE_CMDQ_INTR_EN_REG,
108                                          HCLGE_CMDQ_INTR_GEN_REG};
109
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111                                            HCLGE_VECTOR0_OTER_EN_REG,
112                                            HCLGE_MISC_RESET_STS_REG,
113                                            HCLGE_MISC_VECTOR_INT_STS,
114                                            HCLGE_GLOBAL_RESET_REG,
115                                            HCLGE_FUN_RST_ING,
116                                            HCLGE_GRO_EN_REG};
117
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119                                          HCLGE_RING_RX_ADDR_H_REG,
120                                          HCLGE_RING_RX_BD_NUM_REG,
121                                          HCLGE_RING_RX_BD_LENGTH_REG,
122                                          HCLGE_RING_RX_MERGE_EN_REG,
123                                          HCLGE_RING_RX_TAIL_REG,
124                                          HCLGE_RING_RX_HEAD_REG,
125                                          HCLGE_RING_RX_FBD_NUM_REG,
126                                          HCLGE_RING_RX_OFFSET_REG,
127                                          HCLGE_RING_RX_FBD_OFFSET_REG,
128                                          HCLGE_RING_RX_STASH_REG,
129                                          HCLGE_RING_RX_BD_ERR_REG,
130                                          HCLGE_RING_TX_ADDR_L_REG,
131                                          HCLGE_RING_TX_ADDR_H_REG,
132                                          HCLGE_RING_TX_BD_NUM_REG,
133                                          HCLGE_RING_TX_PRIORITY_REG,
134                                          HCLGE_RING_TX_TC_REG,
135                                          HCLGE_RING_TX_MERGE_EN_REG,
136                                          HCLGE_RING_TX_TAIL_REG,
137                                          HCLGE_RING_TX_HEAD_REG,
138                                          HCLGE_RING_TX_FBD_NUM_REG,
139                                          HCLGE_RING_TX_OFFSET_REG,
140                                          HCLGE_RING_TX_EBD_NUM_REG,
141                                          HCLGE_RING_TX_EBD_OFFSET_REG,
142                                          HCLGE_RING_TX_BD_ERR_REG,
143                                          HCLGE_RING_EN_REG};
144
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146                                              HCLGE_TQP_INTR_GL0_REG,
147                                              HCLGE_TQP_INTR_GL1_REG,
148                                              HCLGE_TQP_INTR_GL2_REG,
149                                              HCLGE_TQP_INTR_RL_REG};
150
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152         "App    Loopback test",
153         "Serdes serial Loopback test",
154         "Serdes parallel Loopback test",
155         "Phy    Loopback test"
156 };
157
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159         {"mac_tx_mac_pause_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161         {"mac_rx_mac_pause_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163         {"mac_tx_control_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
165         {"mac_rx_control_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
167         {"mac_tx_pfc_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
169         {"mac_tx_pfc_pri0_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
171         {"mac_tx_pfc_pri1_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
173         {"mac_tx_pfc_pri2_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
175         {"mac_tx_pfc_pri3_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
177         {"mac_tx_pfc_pri4_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
179         {"mac_tx_pfc_pri5_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
181         {"mac_tx_pfc_pri6_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
183         {"mac_tx_pfc_pri7_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
185         {"mac_rx_pfc_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
187         {"mac_rx_pfc_pri0_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
189         {"mac_rx_pfc_pri1_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
191         {"mac_rx_pfc_pri2_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
193         {"mac_rx_pfc_pri3_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
195         {"mac_rx_pfc_pri4_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
197         {"mac_rx_pfc_pri5_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
199         {"mac_rx_pfc_pri6_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
201         {"mac_rx_pfc_pri7_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
203         {"mac_tx_total_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
205         {"mac_tx_total_oct_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
207         {"mac_tx_good_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
209         {"mac_tx_bad_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
211         {"mac_tx_good_oct_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
213         {"mac_tx_bad_oct_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
215         {"mac_tx_uni_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
217         {"mac_tx_multi_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
219         {"mac_tx_broad_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
221         {"mac_tx_undersize_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
223         {"mac_tx_oversize_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
225         {"mac_tx_64_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
227         {"mac_tx_65_127_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
229         {"mac_tx_128_255_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
231         {"mac_tx_256_511_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
233         {"mac_tx_512_1023_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
235         {"mac_tx_1024_1518_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
237         {"mac_tx_1519_2047_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
239         {"mac_tx_2048_4095_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
241         {"mac_tx_4096_8191_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
243         {"mac_tx_8192_9216_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
245         {"mac_tx_9217_12287_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
247         {"mac_tx_12288_16383_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
249         {"mac_tx_1519_max_good_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
251         {"mac_tx_1519_max_bad_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
253         {"mac_rx_total_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
255         {"mac_rx_total_oct_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
257         {"mac_rx_good_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
259         {"mac_rx_bad_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
261         {"mac_rx_good_oct_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
263         {"mac_rx_bad_oct_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
265         {"mac_rx_uni_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
267         {"mac_rx_multi_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
269         {"mac_rx_broad_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
271         {"mac_rx_undersize_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
273         {"mac_rx_oversize_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
275         {"mac_rx_64_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
277         {"mac_rx_65_127_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
279         {"mac_rx_128_255_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
281         {"mac_rx_256_511_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
283         {"mac_rx_512_1023_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
285         {"mac_rx_1024_1518_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
287         {"mac_rx_1519_2047_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
289         {"mac_rx_2048_4095_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
291         {"mac_rx_4096_8191_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
293         {"mac_rx_8192_9216_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
295         {"mac_rx_9217_12287_oct_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
297         {"mac_rx_12288_16383_oct_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
299         {"mac_rx_1519_max_good_pkt_num",
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
301         {"mac_rx_1519_max_bad_pkt_num",
302                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
303
304         {"mac_tx_fragment_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
306         {"mac_tx_undermin_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
308         {"mac_tx_jabber_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
310         {"mac_tx_err_all_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
312         {"mac_tx_from_app_good_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
314         {"mac_tx_from_app_bad_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
316         {"mac_rx_fragment_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
318         {"mac_rx_undermin_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
320         {"mac_rx_jabber_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
322         {"mac_rx_fcs_err_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
324         {"mac_rx_send_app_good_pkt_num",
325                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
326         {"mac_rx_send_app_bad_pkt_num",
327                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 };
329
330 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
331         {
332                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
333                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
334                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
335                 .i_port_bitmap = 0x1,
336         },
337 };
338
339 static const u8 hclge_hash_key[] = {
340         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
341         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
342         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
343         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
344         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 };
346
347 static const u32 hclge_dfx_bd_offset_list[] = {
348         HCLGE_DFX_BIOS_BD_OFFSET,
349         HCLGE_DFX_SSU_0_BD_OFFSET,
350         HCLGE_DFX_SSU_1_BD_OFFSET,
351         HCLGE_DFX_IGU_BD_OFFSET,
352         HCLGE_DFX_RPU_0_BD_OFFSET,
353         HCLGE_DFX_RPU_1_BD_OFFSET,
354         HCLGE_DFX_NCSI_BD_OFFSET,
355         HCLGE_DFX_RTC_BD_OFFSET,
356         HCLGE_DFX_PPP_BD_OFFSET,
357         HCLGE_DFX_RCB_BD_OFFSET,
358         HCLGE_DFX_TQP_BD_OFFSET,
359         HCLGE_DFX_SSU_2_BD_OFFSET
360 };
361
362 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
363         HCLGE_OPC_DFX_BIOS_COMMON_REG,
364         HCLGE_OPC_DFX_SSU_REG_0,
365         HCLGE_OPC_DFX_SSU_REG_1,
366         HCLGE_OPC_DFX_IGU_EGU_REG,
367         HCLGE_OPC_DFX_RPU_REG_0,
368         HCLGE_OPC_DFX_RPU_REG_1,
369         HCLGE_OPC_DFX_NCSI_REG,
370         HCLGE_OPC_DFX_RTC_REG,
371         HCLGE_OPC_DFX_PPP_REG,
372         HCLGE_OPC_DFX_RCB_REG,
373         HCLGE_OPC_DFX_TQP_REG,
374         HCLGE_OPC_DFX_SSU_REG_2
375 };
376
377 static const struct key_info meta_data_key_info[] = {
378         { PACKET_TYPE_ID, 6},
379         { IP_FRAGEMENT, 1},
380         { ROCE_TYPE, 1},
381         { NEXT_KEY, 5},
382         { VLAN_NUMBER, 2},
383         { SRC_VPORT, 12},
384         { DST_VPORT, 12},
385         { TUNNEL_PACKET, 1},
386 };
387
388 static const struct key_info tuple_key_info[] = {
389         { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
390         { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
391         { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
392         { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
393         { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
394         { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
395         { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
396         { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
397         { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
398         { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
399         { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
400         { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
401         { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
402         { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
403         { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
404         { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
405         { INNER_DST_MAC, 48, KEY_OPT_MAC,
406           offsetof(struct hclge_fd_rule, tuples.dst_mac),
407           offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
408         { INNER_SRC_MAC, 48, KEY_OPT_MAC,
409           offsetof(struct hclge_fd_rule, tuples.src_mac),
410           offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
411         { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
412           offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
413           offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
414         { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
415         { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
416           offsetof(struct hclge_fd_rule, tuples.ether_proto),
417           offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
418         { INNER_L2_RSV, 16, KEY_OPT_LE16,
419           offsetof(struct hclge_fd_rule, tuples.l2_user_def),
420           offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
421         { INNER_IP_TOS, 8, KEY_OPT_U8,
422           offsetof(struct hclge_fd_rule, tuples.ip_tos),
423           offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
424         { INNER_IP_PROTO, 8, KEY_OPT_U8,
425           offsetof(struct hclge_fd_rule, tuples.ip_proto),
426           offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
427         { INNER_SRC_IP, 32, KEY_OPT_IP,
428           offsetof(struct hclge_fd_rule, tuples.src_ip),
429           offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
430         { INNER_DST_IP, 32, KEY_OPT_IP,
431           offsetof(struct hclge_fd_rule, tuples.dst_ip),
432           offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
433         { INNER_L3_RSV, 16, KEY_OPT_LE16,
434           offsetof(struct hclge_fd_rule, tuples.l3_user_def),
435           offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
436         { INNER_SRC_PORT, 16, KEY_OPT_LE16,
437           offsetof(struct hclge_fd_rule, tuples.src_port),
438           offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
439         { INNER_DST_PORT, 16, KEY_OPT_LE16,
440           offsetof(struct hclge_fd_rule, tuples.dst_port),
441           offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
442         { INNER_L4_RSV, 32, KEY_OPT_LE32,
443           offsetof(struct hclge_fd_rule, tuples.l4_user_def),
444           offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
445 };
446
447 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
448 {
449 #define HCLGE_MAC_CMD_NUM 21
450
451         u64 *data = (u64 *)(&hdev->mac_stats);
452         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
453         __le64 *desc_data;
454         int i, k, n;
455         int ret;
456
457         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
458         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
459         if (ret) {
460                 dev_err(&hdev->pdev->dev,
461                         "Get MAC pkt stats fail, status = %d.\n", ret);
462
463                 return ret;
464         }
465
466         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
467                 /* for special opcode 0032, only the first desc has the head */
468                 if (unlikely(i == 0)) {
469                         desc_data = (__le64 *)(&desc[i].data[0]);
470                         n = HCLGE_RD_FIRST_STATS_NUM;
471                 } else {
472                         desc_data = (__le64 *)(&desc[i]);
473                         n = HCLGE_RD_OTHER_STATS_NUM;
474                 }
475
476                 for (k = 0; k < n; k++) {
477                         *data += le64_to_cpu(*desc_data);
478                         data++;
479                         desc_data++;
480                 }
481         }
482
483         return 0;
484 }
485
486 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
487 {
488         u64 *data = (u64 *)(&hdev->mac_stats);
489         struct hclge_desc *desc;
490         __le64 *desc_data;
491         u16 i, k, n;
492         int ret;
493
494         /* This may be called inside atomic sections,
495          * so GFP_ATOMIC is more suitalbe here
496          */
497         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
498         if (!desc)
499                 return -ENOMEM;
500
501         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
502         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
503         if (ret) {
504                 kfree(desc);
505                 return ret;
506         }
507
508         for (i = 0; i < desc_num; i++) {
509                 /* for special opcode 0034, only the first desc has the head */
510                 if (i == 0) {
511                         desc_data = (__le64 *)(&desc[i].data[0]);
512                         n = HCLGE_RD_FIRST_STATS_NUM;
513                 } else {
514                         desc_data = (__le64 *)(&desc[i]);
515                         n = HCLGE_RD_OTHER_STATS_NUM;
516                 }
517
518                 for (k = 0; k < n; k++) {
519                         *data += le64_to_cpu(*desc_data);
520                         data++;
521                         desc_data++;
522                 }
523         }
524
525         kfree(desc);
526
527         return 0;
528 }
529
530 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
531 {
532         struct hclge_desc desc;
533         __le32 *desc_data;
534         u32 reg_num;
535         int ret;
536
537         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
538         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
539         if (ret)
540                 return ret;
541
542         desc_data = (__le32 *)(&desc.data[0]);
543         reg_num = le32_to_cpu(*desc_data);
544
545         *desc_num = 1 + ((reg_num - 3) >> 2) +
546                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
547
548         return 0;
549 }
550
551 static int hclge_mac_update_stats(struct hclge_dev *hdev)
552 {
553         u32 desc_num;
554         int ret;
555
556         ret = hclge_mac_query_reg_num(hdev, &desc_num);
557         /* The firmware supports the new statistics acquisition method */
558         if (!ret)
559                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
560         else if (ret == -EOPNOTSUPP)
561                 ret = hclge_mac_update_stats_defective(hdev);
562         else
563                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
564
565         return ret;
566 }
567
568 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
569 {
570         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
571         struct hclge_vport *vport = hclge_get_vport(handle);
572         struct hclge_dev *hdev = vport->back;
573         struct hnae3_queue *queue;
574         struct hclge_desc desc[1];
575         struct hclge_tqp *tqp;
576         int ret, i;
577
578         for (i = 0; i < kinfo->num_tqps; i++) {
579                 queue = handle->kinfo.tqp[i];
580                 tqp = container_of(queue, struct hclge_tqp, q);
581                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
582                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
583                                            true);
584
585                 desc[0].data[0] = cpu_to_le32(tqp->index);
586                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
587                 if (ret) {
588                         dev_err(&hdev->pdev->dev,
589                                 "Query tqp stat fail, status = %d,queue = %d\n",
590                                 ret, i);
591                         return ret;
592                 }
593                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
594                         le32_to_cpu(desc[0].data[1]);
595         }
596
597         for (i = 0; i < kinfo->num_tqps; i++) {
598                 queue = handle->kinfo.tqp[i];
599                 tqp = container_of(queue, struct hclge_tqp, q);
600                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
601                 hclge_cmd_setup_basic_desc(&desc[0],
602                                            HCLGE_OPC_QUERY_TX_STATS,
603                                            true);
604
605                 desc[0].data[0] = cpu_to_le32(tqp->index);
606                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
607                 if (ret) {
608                         dev_err(&hdev->pdev->dev,
609                                 "Query tqp stat fail, status = %d,queue = %d\n",
610                                 ret, i);
611                         return ret;
612                 }
613                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
614                         le32_to_cpu(desc[0].data[1]);
615         }
616
617         return 0;
618 }
619
620 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
621 {
622         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623         struct hclge_tqp *tqp;
624         u64 *buff = data;
625         int i;
626
627         for (i = 0; i < kinfo->num_tqps; i++) {
628                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
629                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
630         }
631
632         for (i = 0; i < kinfo->num_tqps; i++) {
633                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
634                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
635         }
636
637         return buff;
638 }
639
640 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
641 {
642         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
643
644         /* each tqp has TX & RX two queues */
645         return kinfo->num_tqps * (2);
646 }
647
648 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
649 {
650         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
651         u8 *buff = data;
652         int i;
653
654         for (i = 0; i < kinfo->num_tqps; i++) {
655                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
656                         struct hclge_tqp, q);
657                 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
658                          tqp->index);
659                 buff = buff + ETH_GSTRING_LEN;
660         }
661
662         for (i = 0; i < kinfo->num_tqps; i++) {
663                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
664                         struct hclge_tqp, q);
665                 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
666                          tqp->index);
667                 buff = buff + ETH_GSTRING_LEN;
668         }
669
670         return buff;
671 }
672
673 static u64 *hclge_comm_get_stats(const void *comm_stats,
674                                  const struct hclge_comm_stats_str strs[],
675                                  int size, u64 *data)
676 {
677         u64 *buf = data;
678         u32 i;
679
680         for (i = 0; i < size; i++)
681                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
682
683         return buf + size;
684 }
685
686 static u8 *hclge_comm_get_strings(u32 stringset,
687                                   const struct hclge_comm_stats_str strs[],
688                                   int size, u8 *data)
689 {
690         char *buff = (char *)data;
691         u32 i;
692
693         if (stringset != ETH_SS_STATS)
694                 return buff;
695
696         for (i = 0; i < size; i++) {
697                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
698                 buff = buff + ETH_GSTRING_LEN;
699         }
700
701         return (u8 *)buff;
702 }
703
704 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
705 {
706         struct hnae3_handle *handle;
707         int status;
708
709         handle = &hdev->vport[0].nic;
710         if (handle->client) {
711                 status = hclge_tqps_update_stats(handle);
712                 if (status) {
713                         dev_err(&hdev->pdev->dev,
714                                 "Update TQPS stats fail, status = %d.\n",
715                                 status);
716                 }
717         }
718
719         status = hclge_mac_update_stats(hdev);
720         if (status)
721                 dev_err(&hdev->pdev->dev,
722                         "Update MAC stats fail, status = %d.\n", status);
723 }
724
725 static void hclge_update_stats(struct hnae3_handle *handle,
726                                struct net_device_stats *net_stats)
727 {
728         struct hclge_vport *vport = hclge_get_vport(handle);
729         struct hclge_dev *hdev = vport->back;
730         int status;
731
732         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
733                 return;
734
735         status = hclge_mac_update_stats(hdev);
736         if (status)
737                 dev_err(&hdev->pdev->dev,
738                         "Update MAC stats fail, status = %d.\n",
739                         status);
740
741         status = hclge_tqps_update_stats(handle);
742         if (status)
743                 dev_err(&hdev->pdev->dev,
744                         "Update TQPS stats fail, status = %d.\n",
745                         status);
746
747         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
748 }
749
750 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
751 {
752 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
753                 HNAE3_SUPPORT_PHY_LOOPBACK |\
754                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
755                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
756
757         struct hclge_vport *vport = hclge_get_vport(handle);
758         struct hclge_dev *hdev = vport->back;
759         int count = 0;
760
761         /* Loopback test support rules:
762          * mac: only GE mode support
763          * serdes: all mac mode will support include GE/XGE/LGE/CGE
764          * phy: only support when phy device exist on board
765          */
766         if (stringset == ETH_SS_TEST) {
767                 /* clear loopback bit flags at first */
768                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
769                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
770                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
771                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
772                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
773                         count += 1;
774                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
775                 }
776
777                 count += 2;
778                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
779                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
780
781                 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
782                      hdev->hw.mac.phydev->drv->set_loopback) ||
783                     hnae3_dev_phy_imp_supported(hdev)) {
784                         count += 1;
785                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
786                 }
787         } else if (stringset == ETH_SS_STATS) {
788                 count = ARRAY_SIZE(g_mac_stats_string) +
789                         hclge_tqps_get_sset_count(handle, stringset);
790         }
791
792         return count;
793 }
794
795 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
796                               u8 *data)
797 {
798         u8 *p = (char *)data;
799         int size;
800
801         if (stringset == ETH_SS_STATS) {
802                 size = ARRAY_SIZE(g_mac_stats_string);
803                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
804                                            size, p);
805                 p = hclge_tqps_get_strings(handle, p);
806         } else if (stringset == ETH_SS_TEST) {
807                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
808                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
809                                ETH_GSTRING_LEN);
810                         p += ETH_GSTRING_LEN;
811                 }
812                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
813                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
814                                ETH_GSTRING_LEN);
815                         p += ETH_GSTRING_LEN;
816                 }
817                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
818                         memcpy(p,
819                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
820                                ETH_GSTRING_LEN);
821                         p += ETH_GSTRING_LEN;
822                 }
823                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
824                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
825                                ETH_GSTRING_LEN);
826                         p += ETH_GSTRING_LEN;
827                 }
828         }
829 }
830
831 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
832 {
833         struct hclge_vport *vport = hclge_get_vport(handle);
834         struct hclge_dev *hdev = vport->back;
835         u64 *p;
836
837         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
838                                  ARRAY_SIZE(g_mac_stats_string), data);
839         p = hclge_tqps_get_stats(handle, p);
840 }
841
842 static void hclge_get_mac_stat(struct hnae3_handle *handle,
843                                struct hns3_mac_stats *mac_stats)
844 {
845         struct hclge_vport *vport = hclge_get_vport(handle);
846         struct hclge_dev *hdev = vport->back;
847
848         hclge_update_stats(handle, NULL);
849
850         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
851         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
852 }
853
854 static int hclge_parse_func_status(struct hclge_dev *hdev,
855                                    struct hclge_func_status_cmd *status)
856 {
857 #define HCLGE_MAC_ID_MASK       0xF
858
859         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
860                 return -EINVAL;
861
862         /* Set the pf to main pf */
863         if (status->pf_state & HCLGE_PF_STATE_MAIN)
864                 hdev->flag |= HCLGE_FLAG_MAIN;
865         else
866                 hdev->flag &= ~HCLGE_FLAG_MAIN;
867
868         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
869         return 0;
870 }
871
872 static int hclge_query_function_status(struct hclge_dev *hdev)
873 {
874 #define HCLGE_QUERY_MAX_CNT     5
875
876         struct hclge_func_status_cmd *req;
877         struct hclge_desc desc;
878         int timeout = 0;
879         int ret;
880
881         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
882         req = (struct hclge_func_status_cmd *)desc.data;
883
884         do {
885                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
886                 if (ret) {
887                         dev_err(&hdev->pdev->dev,
888                                 "query function status failed %d.\n", ret);
889                         return ret;
890                 }
891
892                 /* Check pf reset is done */
893                 if (req->pf_state)
894                         break;
895                 usleep_range(1000, 2000);
896         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
897
898         return hclge_parse_func_status(hdev, req);
899 }
900
901 static int hclge_query_pf_resource(struct hclge_dev *hdev)
902 {
903         struct hclge_pf_res_cmd *req;
904         struct hclge_desc desc;
905         int ret;
906
907         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
908         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
909         if (ret) {
910                 dev_err(&hdev->pdev->dev,
911                         "query pf resource failed %d.\n", ret);
912                 return ret;
913         }
914
915         req = (struct hclge_pf_res_cmd *)desc.data;
916         hdev->num_tqps = le16_to_cpu(req->tqp_num) +
917                          le16_to_cpu(req->ext_tqp_num);
918         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
919
920         if (req->tx_buf_size)
921                 hdev->tx_buf_size =
922                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
923         else
924                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
925
926         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
927
928         if (req->dv_buf_size)
929                 hdev->dv_buf_size =
930                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
931         else
932                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
933
934         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
935
936         hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
937         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
938                 dev_err(&hdev->pdev->dev,
939                         "only %u msi resources available, not enough for pf(min:2).\n",
940                         hdev->num_nic_msi);
941                 return -EINVAL;
942         }
943
944         if (hnae3_dev_roce_supported(hdev)) {
945                 hdev->num_roce_msi =
946                         le16_to_cpu(req->pf_intr_vector_number_roce);
947
948                 /* PF should have NIC vectors and Roce vectors,
949                  * NIC vectors are queued before Roce vectors.
950                  */
951                 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
952         } else {
953                 hdev->num_msi = hdev->num_nic_msi;
954         }
955
956         return 0;
957 }
958
959 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
960 {
961         switch (speed_cmd) {
962         case 6:
963                 *speed = HCLGE_MAC_SPEED_10M;
964                 break;
965         case 7:
966                 *speed = HCLGE_MAC_SPEED_100M;
967                 break;
968         case 0:
969                 *speed = HCLGE_MAC_SPEED_1G;
970                 break;
971         case 1:
972                 *speed = HCLGE_MAC_SPEED_10G;
973                 break;
974         case 2:
975                 *speed = HCLGE_MAC_SPEED_25G;
976                 break;
977         case 3:
978                 *speed = HCLGE_MAC_SPEED_40G;
979                 break;
980         case 4:
981                 *speed = HCLGE_MAC_SPEED_50G;
982                 break;
983         case 5:
984                 *speed = HCLGE_MAC_SPEED_100G;
985                 break;
986         case 8:
987                 *speed = HCLGE_MAC_SPEED_200G;
988                 break;
989         default:
990                 return -EINVAL;
991         }
992
993         return 0;
994 }
995
996 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
997 {
998         struct hclge_vport *vport = hclge_get_vport(handle);
999         struct hclge_dev *hdev = vport->back;
1000         u32 speed_ability = hdev->hw.mac.speed_ability;
1001         u32 speed_bit = 0;
1002
1003         switch (speed) {
1004         case HCLGE_MAC_SPEED_10M:
1005                 speed_bit = HCLGE_SUPPORT_10M_BIT;
1006                 break;
1007         case HCLGE_MAC_SPEED_100M:
1008                 speed_bit = HCLGE_SUPPORT_100M_BIT;
1009                 break;
1010         case HCLGE_MAC_SPEED_1G:
1011                 speed_bit = HCLGE_SUPPORT_1G_BIT;
1012                 break;
1013         case HCLGE_MAC_SPEED_10G:
1014                 speed_bit = HCLGE_SUPPORT_10G_BIT;
1015                 break;
1016         case HCLGE_MAC_SPEED_25G:
1017                 speed_bit = HCLGE_SUPPORT_25G_BIT;
1018                 break;
1019         case HCLGE_MAC_SPEED_40G:
1020                 speed_bit = HCLGE_SUPPORT_40G_BIT;
1021                 break;
1022         case HCLGE_MAC_SPEED_50G:
1023                 speed_bit = HCLGE_SUPPORT_50G_BIT;
1024                 break;
1025         case HCLGE_MAC_SPEED_100G:
1026                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1027                 break;
1028         case HCLGE_MAC_SPEED_200G:
1029                 speed_bit = HCLGE_SUPPORT_200G_BIT;
1030                 break;
1031         default:
1032                 return -EINVAL;
1033         }
1034
1035         if (speed_bit & speed_ability)
1036                 return 0;
1037
1038         return -EINVAL;
1039 }
1040
1041 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1042 {
1043         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1044                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1045                                  mac->supported);
1046         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1047                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1048                                  mac->supported);
1049         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1050                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1051                                  mac->supported);
1052         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1053                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1054                                  mac->supported);
1055         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1057                                  mac->supported);
1058         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1059                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1060                                  mac->supported);
1061 }
1062
1063 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1064 {
1065         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1066                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1067                                  mac->supported);
1068         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1069                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1070                                  mac->supported);
1071         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1072                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1073                                  mac->supported);
1074         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1075                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1076                                  mac->supported);
1077         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1078                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1079                                  mac->supported);
1080         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1081                 linkmode_set_bit(
1082                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1083                         mac->supported);
1084 }
1085
1086 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1087 {
1088         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1089                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1090                                  mac->supported);
1091         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1092                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1093                                  mac->supported);
1094         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1095                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1096                                  mac->supported);
1097         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1098                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1099                                  mac->supported);
1100         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1101                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1102                                  mac->supported);
1103         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1104                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1105                                  mac->supported);
1106 }
1107
1108 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1109 {
1110         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1111                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1112                                  mac->supported);
1113         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1114                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1115                                  mac->supported);
1116         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1117                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1118                                  mac->supported);
1119         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1120                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1121                                  mac->supported);
1122         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1123                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1124                                  mac->supported);
1125         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1126                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1127                                  mac->supported);
1128         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1129                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1130                                  mac->supported);
1131 }
1132
1133 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1134 {
1135         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1136         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1137
1138         switch (mac->speed) {
1139         case HCLGE_MAC_SPEED_10G:
1140         case HCLGE_MAC_SPEED_40G:
1141                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1142                                  mac->supported);
1143                 mac->fec_ability =
1144                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1145                 break;
1146         case HCLGE_MAC_SPEED_25G:
1147         case HCLGE_MAC_SPEED_50G:
1148                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1149                                  mac->supported);
1150                 mac->fec_ability =
1151                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1152                         BIT(HNAE3_FEC_AUTO);
1153                 break;
1154         case HCLGE_MAC_SPEED_100G:
1155         case HCLGE_MAC_SPEED_200G:
1156                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1157                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1158                 break;
1159         default:
1160                 mac->fec_ability = 0;
1161                 break;
1162         }
1163 }
1164
1165 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1166                                         u16 speed_ability)
1167 {
1168         struct hclge_mac *mac = &hdev->hw.mac;
1169
1170         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1171                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1172                                  mac->supported);
1173
1174         hclge_convert_setting_sr(mac, speed_ability);
1175         hclge_convert_setting_lr(mac, speed_ability);
1176         hclge_convert_setting_cr(mac, speed_ability);
1177         if (hnae3_dev_fec_supported(hdev))
1178                 hclge_convert_setting_fec(mac);
1179
1180         if (hnae3_dev_pause_supported(hdev))
1181                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1182
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1184         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1185 }
1186
1187 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1188                                             u16 speed_ability)
1189 {
1190         struct hclge_mac *mac = &hdev->hw.mac;
1191
1192         hclge_convert_setting_kr(mac, speed_ability);
1193         if (hnae3_dev_fec_supported(hdev))
1194                 hclge_convert_setting_fec(mac);
1195
1196         if (hnae3_dev_pause_supported(hdev))
1197                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1198
1199         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1200         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1201 }
1202
1203 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1204                                          u16 speed_ability)
1205 {
1206         unsigned long *supported = hdev->hw.mac.supported;
1207
1208         /* default to support all speed for GE port */
1209         if (!speed_ability)
1210                 speed_ability = HCLGE_SUPPORT_GE;
1211
1212         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1213                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1214                                  supported);
1215
1216         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1217                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1218                                  supported);
1219                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1220                                  supported);
1221         }
1222
1223         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1224                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1225                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1226         }
1227
1228         if (hnae3_dev_pause_supported(hdev)) {
1229                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1230                 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1231         }
1232
1233         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1234         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1235 }
1236
1237 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1238 {
1239         u8 media_type = hdev->hw.mac.media_type;
1240
1241         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1242                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1243         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1244                 hclge_parse_copper_link_mode(hdev, speed_ability);
1245         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1246                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1247 }
1248
1249 static u32 hclge_get_max_speed(u16 speed_ability)
1250 {
1251         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1252                 return HCLGE_MAC_SPEED_200G;
1253
1254         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1255                 return HCLGE_MAC_SPEED_100G;
1256
1257         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1258                 return HCLGE_MAC_SPEED_50G;
1259
1260         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1261                 return HCLGE_MAC_SPEED_40G;
1262
1263         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1264                 return HCLGE_MAC_SPEED_25G;
1265
1266         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1267                 return HCLGE_MAC_SPEED_10G;
1268
1269         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1270                 return HCLGE_MAC_SPEED_1G;
1271
1272         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1273                 return HCLGE_MAC_SPEED_100M;
1274
1275         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1276                 return HCLGE_MAC_SPEED_10M;
1277
1278         return HCLGE_MAC_SPEED_1G;
1279 }
1280
1281 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1282 {
1283 #define HCLGE_TX_SPARE_SIZE_UNIT                4096
1284 #define SPEED_ABILITY_EXT_SHIFT                 8
1285
1286         struct hclge_cfg_param_cmd *req;
1287         u64 mac_addr_tmp_high;
1288         u16 speed_ability_ext;
1289         u64 mac_addr_tmp;
1290         unsigned int i;
1291
1292         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1293
1294         /* get the configuration */
1295         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1296                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1297         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1298                                             HCLGE_CFG_TQP_DESC_N_M,
1299                                             HCLGE_CFG_TQP_DESC_N_S);
1300
1301         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1302                                         HCLGE_CFG_PHY_ADDR_M,
1303                                         HCLGE_CFG_PHY_ADDR_S);
1304         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1305                                           HCLGE_CFG_MEDIA_TP_M,
1306                                           HCLGE_CFG_MEDIA_TP_S);
1307         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1308                                           HCLGE_CFG_RX_BUF_LEN_M,
1309                                           HCLGE_CFG_RX_BUF_LEN_S);
1310         /* get mac_address */
1311         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1312         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1313                                             HCLGE_CFG_MAC_ADDR_H_M,
1314                                             HCLGE_CFG_MAC_ADDR_H_S);
1315
1316         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1317
1318         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1319                                              HCLGE_CFG_DEFAULT_SPEED_M,
1320                                              HCLGE_CFG_DEFAULT_SPEED_S);
1321         cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1322                                                HCLGE_CFG_RSS_SIZE_M,
1323                                                HCLGE_CFG_RSS_SIZE_S);
1324
1325         for (i = 0; i < ETH_ALEN; i++)
1326                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1327
1328         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1329         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1330
1331         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1332                                              HCLGE_CFG_SPEED_ABILITY_M,
1333                                              HCLGE_CFG_SPEED_ABILITY_S);
1334         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1335                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1336                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1337         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1338
1339         cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1340                                                HCLGE_CFG_VLAN_FLTR_CAP_M,
1341                                                HCLGE_CFG_VLAN_FLTR_CAP_S);
1342
1343         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1344                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1345                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1346         if (!cfg->umv_space)
1347                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1348
1349         cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1350                                                HCLGE_CFG_PF_RSS_SIZE_M,
1351                                                HCLGE_CFG_PF_RSS_SIZE_S);
1352
1353         /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1354          * power of 2, instead of reading out directly. This would
1355          * be more flexible for future changes and expansions.
1356          * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1357          * it does not make sense if PF's field is 0. In this case, PF and VF
1358          * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1359          */
1360         cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1361                                1U << cfg->pf_rss_size_max :
1362                                cfg->vf_rss_size_max;
1363
1364         /* The unit of the tx spare buffer size queried from configuration
1365          * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1366          * needed here.
1367          */
1368         cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1369                                                  HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1370                                                  HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1371         cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1372 }
1373
1374 /* hclge_get_cfg: query the static parameter from flash
1375  * @hdev: pointer to struct hclge_dev
1376  * @hcfg: the config structure to be getted
1377  */
1378 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1379 {
1380         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1381         struct hclge_cfg_param_cmd *req;
1382         unsigned int i;
1383         int ret;
1384
1385         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1386                 u32 offset = 0;
1387
1388                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1389                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1390                                            true);
1391                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1392                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1393                 /* Len should be united by 4 bytes when send to hardware */
1394                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1395                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1396                 req->offset = cpu_to_le32(offset);
1397         }
1398
1399         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1400         if (ret) {
1401                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1402                 return ret;
1403         }
1404
1405         hclge_parse_cfg(hcfg, desc);
1406
1407         return 0;
1408 }
1409
1410 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1411 {
1412 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1413
1414         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1415
1416         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1417         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1418         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1419         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1420         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1421         ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1422         ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1423 }
1424
1425 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1426                                   struct hclge_desc *desc)
1427 {
1428         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1429         struct hclge_dev_specs_0_cmd *req0;
1430         struct hclge_dev_specs_1_cmd *req1;
1431
1432         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1433         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1434
1435         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1436         ae_dev->dev_specs.rss_ind_tbl_size =
1437                 le16_to_cpu(req0->rss_ind_tbl_size);
1438         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1439         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1440         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1441         ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1442         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1443         ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1444 }
1445
1446 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1447 {
1448         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1449
1450         if (!dev_specs->max_non_tso_bd_num)
1451                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1452         if (!dev_specs->rss_ind_tbl_size)
1453                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1454         if (!dev_specs->rss_key_size)
1455                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1456         if (!dev_specs->max_tm_rate)
1457                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1458         if (!dev_specs->max_qset_num)
1459                 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1460         if (!dev_specs->max_int_gl)
1461                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1462         if (!dev_specs->max_frm_size)
1463                 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1464 }
1465
1466 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1467 {
1468         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1469         int ret;
1470         int i;
1471
1472         /* set default specifications as devices lower than version V3 do not
1473          * support querying specifications from firmware.
1474          */
1475         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1476                 hclge_set_default_dev_specs(hdev);
1477                 return 0;
1478         }
1479
1480         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1481                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1482                                            true);
1483                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1484         }
1485         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1486
1487         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1488         if (ret)
1489                 return ret;
1490
1491         hclge_parse_dev_specs(hdev, desc);
1492         hclge_check_dev_specs(hdev);
1493
1494         return 0;
1495 }
1496
1497 static int hclge_get_cap(struct hclge_dev *hdev)
1498 {
1499         int ret;
1500
1501         ret = hclge_query_function_status(hdev);
1502         if (ret) {
1503                 dev_err(&hdev->pdev->dev,
1504                         "query function status error %d.\n", ret);
1505                 return ret;
1506         }
1507
1508         /* get pf resource */
1509         return hclge_query_pf_resource(hdev);
1510 }
1511
1512 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1513 {
1514 #define HCLGE_MIN_TX_DESC       64
1515 #define HCLGE_MIN_RX_DESC       64
1516
1517         if (!is_kdump_kernel())
1518                 return;
1519
1520         dev_info(&hdev->pdev->dev,
1521                  "Running kdump kernel. Using minimal resources\n");
1522
1523         /* minimal queue pairs equals to the number of vports */
1524         hdev->num_tqps = hdev->num_req_vfs + 1;
1525         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1526         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1527 }
1528
1529 static int hclge_configure(struct hclge_dev *hdev)
1530 {
1531         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1532         struct hclge_cfg cfg;
1533         unsigned int i;
1534         int ret;
1535
1536         ret = hclge_get_cfg(hdev, &cfg);
1537         if (ret)
1538                 return ret;
1539
1540         hdev->base_tqp_pid = 0;
1541         hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1542         hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1543         hdev->rx_buf_len = cfg.rx_buf_len;
1544         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1545         hdev->hw.mac.media_type = cfg.media_type;
1546         hdev->hw.mac.phy_addr = cfg.phy_addr;
1547         hdev->num_tx_desc = cfg.tqp_desc_num;
1548         hdev->num_rx_desc = cfg.tqp_desc_num;
1549         hdev->tm_info.num_pg = 1;
1550         hdev->tc_max = cfg.tc_num;
1551         hdev->tm_info.hw_pfc_map = 0;
1552         hdev->wanted_umv_size = cfg.umv_space;
1553         hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1554         hdev->gro_en = true;
1555         if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1556                 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1557
1558         if (hnae3_dev_fd_supported(hdev)) {
1559                 hdev->fd_en = true;
1560                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1561         }
1562
1563         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1564         if (ret) {
1565                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1566                         cfg.default_speed, ret);
1567                 return ret;
1568         }
1569
1570         hclge_parse_link_mode(hdev, cfg.speed_ability);
1571
1572         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1573
1574         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1575             (hdev->tc_max < 1)) {
1576                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1577                          hdev->tc_max);
1578                 hdev->tc_max = 1;
1579         }
1580
1581         /* Dev does not support DCB */
1582         if (!hnae3_dev_dcb_supported(hdev)) {
1583                 hdev->tc_max = 1;
1584                 hdev->pfc_max = 0;
1585         } else {
1586                 hdev->pfc_max = hdev->tc_max;
1587         }
1588
1589         hdev->tm_info.num_tc = 1;
1590
1591         /* Currently not support uncontiuous tc */
1592         for (i = 0; i < hdev->tm_info.num_tc; i++)
1593                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1594
1595         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1596
1597         hclge_init_kdump_kernel_config(hdev);
1598
1599         /* Set the init affinity based on pci func number */
1600         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1601         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1602         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1603                         &hdev->affinity_mask);
1604
1605         return ret;
1606 }
1607
1608 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1609                             u16 tso_mss_max)
1610 {
1611         struct hclge_cfg_tso_status_cmd *req;
1612         struct hclge_desc desc;
1613
1614         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1615
1616         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1617         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1618         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1619
1620         return hclge_cmd_send(&hdev->hw, &desc, 1);
1621 }
1622
1623 static int hclge_config_gro(struct hclge_dev *hdev)
1624 {
1625         struct hclge_cfg_gro_status_cmd *req;
1626         struct hclge_desc desc;
1627         int ret;
1628
1629         if (!hnae3_dev_gro_supported(hdev))
1630                 return 0;
1631
1632         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1633         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1634
1635         req->gro_en = hdev->gro_en ? 1 : 0;
1636
1637         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1638         if (ret)
1639                 dev_err(&hdev->pdev->dev,
1640                         "GRO hardware config cmd failed, ret = %d\n", ret);
1641
1642         return ret;
1643 }
1644
1645 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1646 {
1647         struct hclge_tqp *tqp;
1648         int i;
1649
1650         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1651                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1652         if (!hdev->htqp)
1653                 return -ENOMEM;
1654
1655         tqp = hdev->htqp;
1656
1657         for (i = 0; i < hdev->num_tqps; i++) {
1658                 tqp->dev = &hdev->pdev->dev;
1659                 tqp->index = i;
1660
1661                 tqp->q.ae_algo = &ae_algo;
1662                 tqp->q.buf_size = hdev->rx_buf_len;
1663                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1664                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1665
1666                 /* need an extended offset to configure queues >=
1667                  * HCLGE_TQP_MAX_SIZE_DEV_V2
1668                  */
1669                 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1670                         tqp->q.io_base = hdev->hw.io_base +
1671                                          HCLGE_TQP_REG_OFFSET +
1672                                          i * HCLGE_TQP_REG_SIZE;
1673                 else
1674                         tqp->q.io_base = hdev->hw.io_base +
1675                                          HCLGE_TQP_REG_OFFSET +
1676                                          HCLGE_TQP_EXT_REG_OFFSET +
1677                                          (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1678                                          HCLGE_TQP_REG_SIZE;
1679
1680                 tqp++;
1681         }
1682
1683         return 0;
1684 }
1685
1686 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1687                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1688 {
1689         struct hclge_tqp_map_cmd *req;
1690         struct hclge_desc desc;
1691         int ret;
1692
1693         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1694
1695         req = (struct hclge_tqp_map_cmd *)desc.data;
1696         req->tqp_id = cpu_to_le16(tqp_pid);
1697         req->tqp_vf = func_id;
1698         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1699         if (!is_pf)
1700                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1701         req->tqp_vid = cpu_to_le16(tqp_vid);
1702
1703         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1704         if (ret)
1705                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1706
1707         return ret;
1708 }
1709
1710 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1711 {
1712         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1713         struct hclge_dev *hdev = vport->back;
1714         int i, alloced;
1715
1716         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1717              alloced < num_tqps; i++) {
1718                 if (!hdev->htqp[i].alloced) {
1719                         hdev->htqp[i].q.handle = &vport->nic;
1720                         hdev->htqp[i].q.tqp_index = alloced;
1721                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1722                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1723                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1724                         hdev->htqp[i].alloced = true;
1725                         alloced++;
1726                 }
1727         }
1728         vport->alloc_tqps = alloced;
1729         kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1730                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1731
1732         /* ensure one to one mapping between irq and queue at default */
1733         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1734                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1735
1736         return 0;
1737 }
1738
1739 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1740                             u16 num_tx_desc, u16 num_rx_desc)
1741
1742 {
1743         struct hnae3_handle *nic = &vport->nic;
1744         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1745         struct hclge_dev *hdev = vport->back;
1746         int ret;
1747
1748         kinfo->num_tx_desc = num_tx_desc;
1749         kinfo->num_rx_desc = num_rx_desc;
1750
1751         kinfo->rx_buf_len = hdev->rx_buf_len;
1752         kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1753
1754         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1755                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1756         if (!kinfo->tqp)
1757                 return -ENOMEM;
1758
1759         ret = hclge_assign_tqp(vport, num_tqps);
1760         if (ret)
1761                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1762
1763         return ret;
1764 }
1765
1766 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1767                                   struct hclge_vport *vport)
1768 {
1769         struct hnae3_handle *nic = &vport->nic;
1770         struct hnae3_knic_private_info *kinfo;
1771         u16 i;
1772
1773         kinfo = &nic->kinfo;
1774         for (i = 0; i < vport->alloc_tqps; i++) {
1775                 struct hclge_tqp *q =
1776                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1777                 bool is_pf;
1778                 int ret;
1779
1780                 is_pf = !(vport->vport_id);
1781                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1782                                              i, is_pf);
1783                 if (ret)
1784                         return ret;
1785         }
1786
1787         return 0;
1788 }
1789
1790 static int hclge_map_tqp(struct hclge_dev *hdev)
1791 {
1792         struct hclge_vport *vport = hdev->vport;
1793         u16 i, num_vport;
1794
1795         num_vport = hdev->num_req_vfs + 1;
1796         for (i = 0; i < num_vport; i++) {
1797                 int ret;
1798
1799                 ret = hclge_map_tqp_to_vport(hdev, vport);
1800                 if (ret)
1801                         return ret;
1802
1803                 vport++;
1804         }
1805
1806         return 0;
1807 }
1808
1809 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1810 {
1811         struct hnae3_handle *nic = &vport->nic;
1812         struct hclge_dev *hdev = vport->back;
1813         int ret;
1814
1815         nic->pdev = hdev->pdev;
1816         nic->ae_algo = &ae_algo;
1817         nic->numa_node_mask = hdev->numa_node_mask;
1818         nic->kinfo.io_base = hdev->hw.io_base;
1819
1820         ret = hclge_knic_setup(vport, num_tqps,
1821                                hdev->num_tx_desc, hdev->num_rx_desc);
1822         if (ret)
1823                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1824
1825         return ret;
1826 }
1827
1828 static int hclge_alloc_vport(struct hclge_dev *hdev)
1829 {
1830         struct pci_dev *pdev = hdev->pdev;
1831         struct hclge_vport *vport;
1832         u32 tqp_main_vport;
1833         u32 tqp_per_vport;
1834         int num_vport, i;
1835         int ret;
1836
1837         /* We need to alloc a vport for main NIC of PF */
1838         num_vport = hdev->num_req_vfs + 1;
1839
1840         if (hdev->num_tqps < num_vport) {
1841                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1842                         hdev->num_tqps, num_vport);
1843                 return -EINVAL;
1844         }
1845
1846         /* Alloc the same number of TQPs for every vport */
1847         tqp_per_vport = hdev->num_tqps / num_vport;
1848         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1849
1850         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1851                              GFP_KERNEL);
1852         if (!vport)
1853                 return -ENOMEM;
1854
1855         hdev->vport = vport;
1856         hdev->num_alloc_vport = num_vport;
1857
1858         if (IS_ENABLED(CONFIG_PCI_IOV))
1859                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1860
1861         for (i = 0; i < num_vport; i++) {
1862                 vport->back = hdev;
1863                 vport->vport_id = i;
1864                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1865                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1866                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1867                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1868                 vport->req_vlan_fltr_en = true;
1869                 INIT_LIST_HEAD(&vport->vlan_list);
1870                 INIT_LIST_HEAD(&vport->uc_mac_list);
1871                 INIT_LIST_HEAD(&vport->mc_mac_list);
1872                 spin_lock_init(&vport->mac_list_lock);
1873
1874                 if (i == 0)
1875                         ret = hclge_vport_setup(vport, tqp_main_vport);
1876                 else
1877                         ret = hclge_vport_setup(vport, tqp_per_vport);
1878                 if (ret) {
1879                         dev_err(&pdev->dev,
1880                                 "vport setup failed for vport %d, %d\n",
1881                                 i, ret);
1882                         return ret;
1883                 }
1884
1885                 vport++;
1886         }
1887
1888         return 0;
1889 }
1890
1891 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1892                                     struct hclge_pkt_buf_alloc *buf_alloc)
1893 {
1894 /* TX buffer size is unit by 128 byte */
1895 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1896 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1897         struct hclge_tx_buff_alloc_cmd *req;
1898         struct hclge_desc desc;
1899         int ret;
1900         u8 i;
1901
1902         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1903
1904         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1905         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1906                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1907
1908                 req->tx_pkt_buff[i] =
1909                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1910                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1911         }
1912
1913         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1914         if (ret)
1915                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1916                         ret);
1917
1918         return ret;
1919 }
1920
1921 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1922                                  struct hclge_pkt_buf_alloc *buf_alloc)
1923 {
1924         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1925
1926         if (ret)
1927                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1928
1929         return ret;
1930 }
1931
1932 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1933 {
1934         unsigned int i;
1935         u32 cnt = 0;
1936
1937         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1938                 if (hdev->hw_tc_map & BIT(i))
1939                         cnt++;
1940         return cnt;
1941 }
1942
1943 /* Get the number of pfc enabled TCs, which have private buffer */
1944 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1945                                   struct hclge_pkt_buf_alloc *buf_alloc)
1946 {
1947         struct hclge_priv_buf *priv;
1948         unsigned int i;
1949         int cnt = 0;
1950
1951         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1952                 priv = &buf_alloc->priv_buf[i];
1953                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1954                     priv->enable)
1955                         cnt++;
1956         }
1957
1958         return cnt;
1959 }
1960
1961 /* Get the number of pfc disabled TCs, which have private buffer */
1962 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1963                                      struct hclge_pkt_buf_alloc *buf_alloc)
1964 {
1965         struct hclge_priv_buf *priv;
1966         unsigned int i;
1967         int cnt = 0;
1968
1969         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1970                 priv = &buf_alloc->priv_buf[i];
1971                 if (hdev->hw_tc_map & BIT(i) &&
1972                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1973                     priv->enable)
1974                         cnt++;
1975         }
1976
1977         return cnt;
1978 }
1979
1980 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1981 {
1982         struct hclge_priv_buf *priv;
1983         u32 rx_priv = 0;
1984         int i;
1985
1986         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1987                 priv = &buf_alloc->priv_buf[i];
1988                 if (priv->enable)
1989                         rx_priv += priv->buf_size;
1990         }
1991         return rx_priv;
1992 }
1993
1994 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1995 {
1996         u32 i, total_tx_size = 0;
1997
1998         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1999                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2000
2001         return total_tx_size;
2002 }
2003
2004 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2005                                 struct hclge_pkt_buf_alloc *buf_alloc,
2006                                 u32 rx_all)
2007 {
2008         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2009         u32 tc_num = hclge_get_tc_num(hdev);
2010         u32 shared_buf, aligned_mps;
2011         u32 rx_priv;
2012         int i;
2013
2014         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2015
2016         if (hnae3_dev_dcb_supported(hdev))
2017                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2018                                         hdev->dv_buf_size;
2019         else
2020                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2021                                         + hdev->dv_buf_size;
2022
2023         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2024         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2025                              HCLGE_BUF_SIZE_UNIT);
2026
2027         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2028         if (rx_all < rx_priv + shared_std)
2029                 return false;
2030
2031         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2032         buf_alloc->s_buf.buf_size = shared_buf;
2033         if (hnae3_dev_dcb_supported(hdev)) {
2034                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2035                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2036                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2037                                   HCLGE_BUF_SIZE_UNIT);
2038         } else {
2039                 buf_alloc->s_buf.self.high = aligned_mps +
2040                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
2041                 buf_alloc->s_buf.self.low = aligned_mps;
2042         }
2043
2044         if (hnae3_dev_dcb_supported(hdev)) {
2045                 hi_thrd = shared_buf - hdev->dv_buf_size;
2046
2047                 if (tc_num <= NEED_RESERVE_TC_NUM)
2048                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2049                                         / BUF_MAX_PERCENT;
2050
2051                 if (tc_num)
2052                         hi_thrd = hi_thrd / tc_num;
2053
2054                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2055                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2056                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2057         } else {
2058                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2059                 lo_thrd = aligned_mps;
2060         }
2061
2062         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2063                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2064                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2065         }
2066
2067         return true;
2068 }
2069
2070 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2071                                 struct hclge_pkt_buf_alloc *buf_alloc)
2072 {
2073         u32 i, total_size;
2074
2075         total_size = hdev->pkt_buf_size;
2076
2077         /* alloc tx buffer for all enabled tc */
2078         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2079                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2080
2081                 if (hdev->hw_tc_map & BIT(i)) {
2082                         if (total_size < hdev->tx_buf_size)
2083                                 return -ENOMEM;
2084
2085                         priv->tx_buf_size = hdev->tx_buf_size;
2086                 } else {
2087                         priv->tx_buf_size = 0;
2088                 }
2089
2090                 total_size -= priv->tx_buf_size;
2091         }
2092
2093         return 0;
2094 }
2095
2096 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2097                                   struct hclge_pkt_buf_alloc *buf_alloc)
2098 {
2099         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2100         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2101         unsigned int i;
2102
2103         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2104                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2105
2106                 priv->enable = 0;
2107                 priv->wl.low = 0;
2108                 priv->wl.high = 0;
2109                 priv->buf_size = 0;
2110
2111                 if (!(hdev->hw_tc_map & BIT(i)))
2112                         continue;
2113
2114                 priv->enable = 1;
2115
2116                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2117                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2118                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2119                                                 HCLGE_BUF_SIZE_UNIT);
2120                 } else {
2121                         priv->wl.low = 0;
2122                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2123                                         aligned_mps;
2124                 }
2125
2126                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2127         }
2128
2129         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2130 }
2131
2132 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2133                                           struct hclge_pkt_buf_alloc *buf_alloc)
2134 {
2135         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2136         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2137         int i;
2138
2139         /* let the last to be cleared first */
2140         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2141                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2142                 unsigned int mask = BIT((unsigned int)i);
2143
2144                 if (hdev->hw_tc_map & mask &&
2145                     !(hdev->tm_info.hw_pfc_map & mask)) {
2146                         /* Clear the no pfc TC private buffer */
2147                         priv->wl.low = 0;
2148                         priv->wl.high = 0;
2149                         priv->buf_size = 0;
2150                         priv->enable = 0;
2151                         no_pfc_priv_num--;
2152                 }
2153
2154                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2155                     no_pfc_priv_num == 0)
2156                         break;
2157         }
2158
2159         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2160 }
2161
2162 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2163                                         struct hclge_pkt_buf_alloc *buf_alloc)
2164 {
2165         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2166         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2167         int i;
2168
2169         /* let the last to be cleared first */
2170         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2171                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2172                 unsigned int mask = BIT((unsigned int)i);
2173
2174                 if (hdev->hw_tc_map & mask &&
2175                     hdev->tm_info.hw_pfc_map & mask) {
2176                         /* Reduce the number of pfc TC with private buffer */
2177                         priv->wl.low = 0;
2178                         priv->enable = 0;
2179                         priv->wl.high = 0;
2180                         priv->buf_size = 0;
2181                         pfc_priv_num--;
2182                 }
2183
2184                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2185                     pfc_priv_num == 0)
2186                         break;
2187         }
2188
2189         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2190 }
2191
2192 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2193                                       struct hclge_pkt_buf_alloc *buf_alloc)
2194 {
2195 #define COMPENSATE_BUFFER       0x3C00
2196 #define COMPENSATE_HALF_MPS_NUM 5
2197 #define PRIV_WL_GAP             0x1800
2198
2199         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2200         u32 tc_num = hclge_get_tc_num(hdev);
2201         u32 half_mps = hdev->mps >> 1;
2202         u32 min_rx_priv;
2203         unsigned int i;
2204
2205         if (tc_num)
2206                 rx_priv = rx_priv / tc_num;
2207
2208         if (tc_num <= NEED_RESERVE_TC_NUM)
2209                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2210
2211         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2212                         COMPENSATE_HALF_MPS_NUM * half_mps;
2213         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2214         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2215         if (rx_priv < min_rx_priv)
2216                 return false;
2217
2218         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2219                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2220
2221                 priv->enable = 0;
2222                 priv->wl.low = 0;
2223                 priv->wl.high = 0;
2224                 priv->buf_size = 0;
2225
2226                 if (!(hdev->hw_tc_map & BIT(i)))
2227                         continue;
2228
2229                 priv->enable = 1;
2230                 priv->buf_size = rx_priv;
2231                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2232                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2233         }
2234
2235         buf_alloc->s_buf.buf_size = 0;
2236
2237         return true;
2238 }
2239
2240 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2241  * @hdev: pointer to struct hclge_dev
2242  * @buf_alloc: pointer to buffer calculation data
2243  * @return: 0: calculate successful, negative: fail
2244  */
2245 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2246                                 struct hclge_pkt_buf_alloc *buf_alloc)
2247 {
2248         /* When DCB is not supported, rx private buffer is not allocated. */
2249         if (!hnae3_dev_dcb_supported(hdev)) {
2250                 u32 rx_all = hdev->pkt_buf_size;
2251
2252                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2253                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2254                         return -ENOMEM;
2255
2256                 return 0;
2257         }
2258
2259         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2260                 return 0;
2261
2262         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2263                 return 0;
2264
2265         /* try to decrease the buffer size */
2266         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2267                 return 0;
2268
2269         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2270                 return 0;
2271
2272         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2273                 return 0;
2274
2275         return -ENOMEM;
2276 }
2277
2278 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2279                                    struct hclge_pkt_buf_alloc *buf_alloc)
2280 {
2281         struct hclge_rx_priv_buff_cmd *req;
2282         struct hclge_desc desc;
2283         int ret;
2284         int i;
2285
2286         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2287         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2288
2289         /* Alloc private buffer TCs */
2290         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2291                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2292
2293                 req->buf_num[i] =
2294                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2295                 req->buf_num[i] |=
2296                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2297         }
2298
2299         req->shared_buf =
2300                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2301                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2302
2303         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2304         if (ret)
2305                 dev_err(&hdev->pdev->dev,
2306                         "rx private buffer alloc cmd failed %d\n", ret);
2307
2308         return ret;
2309 }
2310
2311 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2312                                    struct hclge_pkt_buf_alloc *buf_alloc)
2313 {
2314         struct hclge_rx_priv_wl_buf *req;
2315         struct hclge_priv_buf *priv;
2316         struct hclge_desc desc[2];
2317         int i, j;
2318         int ret;
2319
2320         for (i = 0; i < 2; i++) {
2321                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2322                                            false);
2323                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2324
2325                 /* The first descriptor set the NEXT bit to 1 */
2326                 if (i == 0)
2327                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2328                 else
2329                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2330
2331                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2332                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2333
2334                         priv = &buf_alloc->priv_buf[idx];
2335                         req->tc_wl[j].high =
2336                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2337                         req->tc_wl[j].high |=
2338                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2339                         req->tc_wl[j].low =
2340                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2341                         req->tc_wl[j].low |=
2342                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2343                 }
2344         }
2345
2346         /* Send 2 descriptor at one time */
2347         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2348         if (ret)
2349                 dev_err(&hdev->pdev->dev,
2350                         "rx private waterline config cmd failed %d\n",
2351                         ret);
2352         return ret;
2353 }
2354
2355 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2356                                     struct hclge_pkt_buf_alloc *buf_alloc)
2357 {
2358         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2359         struct hclge_rx_com_thrd *req;
2360         struct hclge_desc desc[2];
2361         struct hclge_tc_thrd *tc;
2362         int i, j;
2363         int ret;
2364
2365         for (i = 0; i < 2; i++) {
2366                 hclge_cmd_setup_basic_desc(&desc[i],
2367                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2368                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2369
2370                 /* The first descriptor set the NEXT bit to 1 */
2371                 if (i == 0)
2372                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2373                 else
2374                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2375
2376                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2377                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2378
2379                         req->com_thrd[j].high =
2380                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2381                         req->com_thrd[j].high |=
2382                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2383                         req->com_thrd[j].low =
2384                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2385                         req->com_thrd[j].low |=
2386                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2387                 }
2388         }
2389
2390         /* Send 2 descriptors at one time */
2391         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2392         if (ret)
2393                 dev_err(&hdev->pdev->dev,
2394                         "common threshold config cmd failed %d\n", ret);
2395         return ret;
2396 }
2397
2398 static int hclge_common_wl_config(struct hclge_dev *hdev,
2399                                   struct hclge_pkt_buf_alloc *buf_alloc)
2400 {
2401         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2402         struct hclge_rx_com_wl *req;
2403         struct hclge_desc desc;
2404         int ret;
2405
2406         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2407
2408         req = (struct hclge_rx_com_wl *)desc.data;
2409         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2410         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2411
2412         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2413         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2414
2415         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2416         if (ret)
2417                 dev_err(&hdev->pdev->dev,
2418                         "common waterline config cmd failed %d\n", ret);
2419
2420         return ret;
2421 }
2422
2423 int hclge_buffer_alloc(struct hclge_dev *hdev)
2424 {
2425         struct hclge_pkt_buf_alloc *pkt_buf;
2426         int ret;
2427
2428         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2429         if (!pkt_buf)
2430                 return -ENOMEM;
2431
2432         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2433         if (ret) {
2434                 dev_err(&hdev->pdev->dev,
2435                         "could not calc tx buffer size for all TCs %d\n", ret);
2436                 goto out;
2437         }
2438
2439         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2440         if (ret) {
2441                 dev_err(&hdev->pdev->dev,
2442                         "could not alloc tx buffers %d\n", ret);
2443                 goto out;
2444         }
2445
2446         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2447         if (ret) {
2448                 dev_err(&hdev->pdev->dev,
2449                         "could not calc rx priv buffer size for all TCs %d\n",
2450                         ret);
2451                 goto out;
2452         }
2453
2454         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2455         if (ret) {
2456                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2457                         ret);
2458                 goto out;
2459         }
2460
2461         if (hnae3_dev_dcb_supported(hdev)) {
2462                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2463                 if (ret) {
2464                         dev_err(&hdev->pdev->dev,
2465                                 "could not configure rx private waterline %d\n",
2466                                 ret);
2467                         goto out;
2468                 }
2469
2470                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2471                 if (ret) {
2472                         dev_err(&hdev->pdev->dev,
2473                                 "could not configure common threshold %d\n",
2474                                 ret);
2475                         goto out;
2476                 }
2477         }
2478
2479         ret = hclge_common_wl_config(hdev, pkt_buf);
2480         if (ret)
2481                 dev_err(&hdev->pdev->dev,
2482                         "could not configure common waterline %d\n", ret);
2483
2484 out:
2485         kfree(pkt_buf);
2486         return ret;
2487 }
2488
2489 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2490 {
2491         struct hnae3_handle *roce = &vport->roce;
2492         struct hnae3_handle *nic = &vport->nic;
2493         struct hclge_dev *hdev = vport->back;
2494
2495         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2496
2497         if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2498                 return -EINVAL;
2499
2500         roce->rinfo.base_vector = hdev->roce_base_vector;
2501
2502         roce->rinfo.netdev = nic->kinfo.netdev;
2503         roce->rinfo.roce_io_base = hdev->hw.io_base;
2504         roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2505
2506         roce->pdev = nic->pdev;
2507         roce->ae_algo = nic->ae_algo;
2508         roce->numa_node_mask = nic->numa_node_mask;
2509
2510         return 0;
2511 }
2512
2513 static int hclge_init_msi(struct hclge_dev *hdev)
2514 {
2515         struct pci_dev *pdev = hdev->pdev;
2516         int vectors;
2517         int i;
2518
2519         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2520                                         hdev->num_msi,
2521                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2522         if (vectors < 0) {
2523                 dev_err(&pdev->dev,
2524                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2525                         vectors);
2526                 return vectors;
2527         }
2528         if (vectors < hdev->num_msi)
2529                 dev_warn(&hdev->pdev->dev,
2530                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2531                          hdev->num_msi, vectors);
2532
2533         hdev->num_msi = vectors;
2534         hdev->num_msi_left = vectors;
2535
2536         hdev->base_msi_vector = pdev->irq;
2537         hdev->roce_base_vector = hdev->base_msi_vector +
2538                                 hdev->num_nic_msi;
2539
2540         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2541                                            sizeof(u16), GFP_KERNEL);
2542         if (!hdev->vector_status) {
2543                 pci_free_irq_vectors(pdev);
2544                 return -ENOMEM;
2545         }
2546
2547         for (i = 0; i < hdev->num_msi; i++)
2548                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2549
2550         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2551                                         sizeof(int), GFP_KERNEL);
2552         if (!hdev->vector_irq) {
2553                 pci_free_irq_vectors(pdev);
2554                 return -ENOMEM;
2555         }
2556
2557         return 0;
2558 }
2559
2560 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2561 {
2562         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2563                 duplex = HCLGE_MAC_FULL;
2564
2565         return duplex;
2566 }
2567
2568 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2569                                       u8 duplex)
2570 {
2571         struct hclge_config_mac_speed_dup_cmd *req;
2572         struct hclge_desc desc;
2573         int ret;
2574
2575         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2576
2577         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2578
2579         if (duplex)
2580                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2581
2582         switch (speed) {
2583         case HCLGE_MAC_SPEED_10M:
2584                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2585                                 HCLGE_CFG_SPEED_S, 6);
2586                 break;
2587         case HCLGE_MAC_SPEED_100M:
2588                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2589                                 HCLGE_CFG_SPEED_S, 7);
2590                 break;
2591         case HCLGE_MAC_SPEED_1G:
2592                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2593                                 HCLGE_CFG_SPEED_S, 0);
2594                 break;
2595         case HCLGE_MAC_SPEED_10G:
2596                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2597                                 HCLGE_CFG_SPEED_S, 1);
2598                 break;
2599         case HCLGE_MAC_SPEED_25G:
2600                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2601                                 HCLGE_CFG_SPEED_S, 2);
2602                 break;
2603         case HCLGE_MAC_SPEED_40G:
2604                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2605                                 HCLGE_CFG_SPEED_S, 3);
2606                 break;
2607         case HCLGE_MAC_SPEED_50G:
2608                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2609                                 HCLGE_CFG_SPEED_S, 4);
2610                 break;
2611         case HCLGE_MAC_SPEED_100G:
2612                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2613                                 HCLGE_CFG_SPEED_S, 5);
2614                 break;
2615         case HCLGE_MAC_SPEED_200G:
2616                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2617                                 HCLGE_CFG_SPEED_S, 8);
2618                 break;
2619         default:
2620                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2621                 return -EINVAL;
2622         }
2623
2624         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2625                       1);
2626
2627         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2628         if (ret) {
2629                 dev_err(&hdev->pdev->dev,
2630                         "mac speed/duplex config cmd failed %d.\n", ret);
2631                 return ret;
2632         }
2633
2634         return 0;
2635 }
2636
2637 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2638 {
2639         struct hclge_mac *mac = &hdev->hw.mac;
2640         int ret;
2641
2642         duplex = hclge_check_speed_dup(duplex, speed);
2643         if (!mac->support_autoneg && mac->speed == speed &&
2644             mac->duplex == duplex)
2645                 return 0;
2646
2647         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2648         if (ret)
2649                 return ret;
2650
2651         hdev->hw.mac.speed = speed;
2652         hdev->hw.mac.duplex = duplex;
2653
2654         return 0;
2655 }
2656
2657 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2658                                      u8 duplex)
2659 {
2660         struct hclge_vport *vport = hclge_get_vport(handle);
2661         struct hclge_dev *hdev = vport->back;
2662
2663         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2664 }
2665
2666 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2667 {
2668         struct hclge_config_auto_neg_cmd *req;
2669         struct hclge_desc desc;
2670         u32 flag = 0;
2671         int ret;
2672
2673         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2674
2675         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2676         if (enable)
2677                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2678         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2679
2680         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2681         if (ret)
2682                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2683                         ret);
2684
2685         return ret;
2686 }
2687
2688 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2689 {
2690         struct hclge_vport *vport = hclge_get_vport(handle);
2691         struct hclge_dev *hdev = vport->back;
2692
2693         if (!hdev->hw.mac.support_autoneg) {
2694                 if (enable) {
2695                         dev_err(&hdev->pdev->dev,
2696                                 "autoneg is not supported by current port\n");
2697                         return -EOPNOTSUPP;
2698                 } else {
2699                         return 0;
2700                 }
2701         }
2702
2703         return hclge_set_autoneg_en(hdev, enable);
2704 }
2705
2706 static int hclge_get_autoneg(struct hnae3_handle *handle)
2707 {
2708         struct hclge_vport *vport = hclge_get_vport(handle);
2709         struct hclge_dev *hdev = vport->back;
2710         struct phy_device *phydev = hdev->hw.mac.phydev;
2711
2712         if (phydev)
2713                 return phydev->autoneg;
2714
2715         return hdev->hw.mac.autoneg;
2716 }
2717
2718 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2719 {
2720         struct hclge_vport *vport = hclge_get_vport(handle);
2721         struct hclge_dev *hdev = vport->back;
2722         int ret;
2723
2724         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2725
2726         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2727         if (ret)
2728                 return ret;
2729         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2730 }
2731
2732 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2733 {
2734         struct hclge_vport *vport = hclge_get_vport(handle);
2735         struct hclge_dev *hdev = vport->back;
2736
2737         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2738                 return hclge_set_autoneg_en(hdev, !halt);
2739
2740         return 0;
2741 }
2742
2743 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2744 {
2745         struct hclge_config_fec_cmd *req;
2746         struct hclge_desc desc;
2747         int ret;
2748
2749         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2750
2751         req = (struct hclge_config_fec_cmd *)desc.data;
2752         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2753                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2754         if (fec_mode & BIT(HNAE3_FEC_RS))
2755                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2756                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2757         if (fec_mode & BIT(HNAE3_FEC_BASER))
2758                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2759                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2760
2761         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2762         if (ret)
2763                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2764
2765         return ret;
2766 }
2767
2768 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2769 {
2770         struct hclge_vport *vport = hclge_get_vport(handle);
2771         struct hclge_dev *hdev = vport->back;
2772         struct hclge_mac *mac = &hdev->hw.mac;
2773         int ret;
2774
2775         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2776                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2777                 return -EINVAL;
2778         }
2779
2780         ret = hclge_set_fec_hw(hdev, fec_mode);
2781         if (ret)
2782                 return ret;
2783
2784         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2785         return 0;
2786 }
2787
2788 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2789                           u8 *fec_mode)
2790 {
2791         struct hclge_vport *vport = hclge_get_vport(handle);
2792         struct hclge_dev *hdev = vport->back;
2793         struct hclge_mac *mac = &hdev->hw.mac;
2794
2795         if (fec_ability)
2796                 *fec_ability = mac->fec_ability;
2797         if (fec_mode)
2798                 *fec_mode = mac->fec_mode;
2799 }
2800
2801 static int hclge_mac_init(struct hclge_dev *hdev)
2802 {
2803         struct hclge_mac *mac = &hdev->hw.mac;
2804         int ret;
2805
2806         hdev->support_sfp_query = true;
2807         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2808         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2809                                          hdev->hw.mac.duplex);
2810         if (ret)
2811                 return ret;
2812
2813         if (hdev->hw.mac.support_autoneg) {
2814                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2815                 if (ret)
2816                         return ret;
2817         }
2818
2819         mac->link = 0;
2820
2821         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2822                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2823                 if (ret)
2824                         return ret;
2825         }
2826
2827         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2828         if (ret) {
2829                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2830                 return ret;
2831         }
2832
2833         ret = hclge_set_default_loopback(hdev);
2834         if (ret)
2835                 return ret;
2836
2837         ret = hclge_buffer_alloc(hdev);
2838         if (ret)
2839                 dev_err(&hdev->pdev->dev,
2840                         "allocate buffer fail, ret=%d\n", ret);
2841
2842         return ret;
2843 }
2844
2845 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2846 {
2847         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2848             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2849                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2850                                     hclge_wq, &hdev->service_task, 0);
2851 }
2852
2853 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2854 {
2855         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2856             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2857                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2858                                     hclge_wq, &hdev->service_task, 0);
2859 }
2860
2861 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2862 {
2863         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2864             !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2865                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2866                                     hclge_wq, &hdev->service_task, 0);
2867 }
2868
2869 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2870 {
2871         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2872             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2873                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2874                                     hclge_wq, &hdev->service_task,
2875                                     delay_time);
2876 }
2877
2878 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2879 {
2880         struct hclge_link_status_cmd *req;
2881         struct hclge_desc desc;
2882         int ret;
2883
2884         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2885         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2886         if (ret) {
2887                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2888                         ret);
2889                 return ret;
2890         }
2891
2892         req = (struct hclge_link_status_cmd *)desc.data;
2893         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2894                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2895
2896         return 0;
2897 }
2898
2899 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2900 {
2901         struct phy_device *phydev = hdev->hw.mac.phydev;
2902
2903         *link_status = HCLGE_LINK_STATUS_DOWN;
2904
2905         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2906                 return 0;
2907
2908         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2909                 return 0;
2910
2911         return hclge_get_mac_link_status(hdev, link_status);
2912 }
2913
2914 static void hclge_push_link_status(struct hclge_dev *hdev)
2915 {
2916         struct hclge_vport *vport;
2917         int ret;
2918         u16 i;
2919
2920         for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2921                 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2922
2923                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2924                     vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2925                         continue;
2926
2927                 ret = hclge_push_vf_link_status(vport);
2928                 if (ret) {
2929                         dev_err(&hdev->pdev->dev,
2930                                 "failed to push link status to vf%u, ret = %d\n",
2931                                 i, ret);
2932                 }
2933         }
2934 }
2935
2936 static void hclge_update_link_status(struct hclge_dev *hdev)
2937 {
2938         struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2939         struct hnae3_handle *handle = &hdev->vport[0].nic;
2940         struct hnae3_client *rclient = hdev->roce_client;
2941         struct hnae3_client *client = hdev->nic_client;
2942         int state;
2943         int ret;
2944
2945         if (!client)
2946                 return;
2947
2948         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2949                 return;
2950
2951         ret = hclge_get_mac_phy_link(hdev, &state);
2952         if (ret) {
2953                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2954                 return;
2955         }
2956
2957         if (state != hdev->hw.mac.link) {
2958                 hdev->hw.mac.link = state;
2959                 client->ops->link_status_change(handle, state);
2960                 hclge_config_mac_tnl_int(hdev, state);
2961                 if (rclient && rclient->ops->link_status_change)
2962                         rclient->ops->link_status_change(rhandle, state);
2963
2964                 hclge_push_link_status(hdev);
2965         }
2966
2967         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2968 }
2969
2970 static void hclge_update_port_capability(struct hclge_dev *hdev,
2971                                          struct hclge_mac *mac)
2972 {
2973         if (hnae3_dev_fec_supported(hdev))
2974                 /* update fec ability by speed */
2975                 hclge_convert_setting_fec(mac);
2976
2977         /* firmware can not identify back plane type, the media type
2978          * read from configuration can help deal it
2979          */
2980         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2981             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2982                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2983         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2984                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2985
2986         if (mac->support_autoneg) {
2987                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2988                 linkmode_copy(mac->advertising, mac->supported);
2989         } else {
2990                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2991                                    mac->supported);
2992                 linkmode_zero(mac->advertising);
2993         }
2994 }
2995
2996 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2997 {
2998         struct hclge_sfp_info_cmd *resp;
2999         struct hclge_desc desc;
3000         int ret;
3001
3002         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3003         resp = (struct hclge_sfp_info_cmd *)desc.data;
3004         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3005         if (ret == -EOPNOTSUPP) {
3006                 dev_warn(&hdev->pdev->dev,
3007                          "IMP do not support get SFP speed %d\n", ret);
3008                 return ret;
3009         } else if (ret) {
3010                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3011                 return ret;
3012         }
3013
3014         *speed = le32_to_cpu(resp->speed);
3015
3016         return 0;
3017 }
3018
3019 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3020 {
3021         struct hclge_sfp_info_cmd *resp;
3022         struct hclge_desc desc;
3023         int ret;
3024
3025         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3026         resp = (struct hclge_sfp_info_cmd *)desc.data;
3027
3028         resp->query_type = QUERY_ACTIVE_SPEED;
3029
3030         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3031         if (ret == -EOPNOTSUPP) {
3032                 dev_warn(&hdev->pdev->dev,
3033                          "IMP does not support get SFP info %d\n", ret);
3034                 return ret;
3035         } else if (ret) {
3036                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3037                 return ret;
3038         }
3039
3040         /* In some case, mac speed get from IMP may be 0, it shouldn't be
3041          * set to mac->speed.
3042          */
3043         if (!le32_to_cpu(resp->speed))
3044                 return 0;
3045
3046         mac->speed = le32_to_cpu(resp->speed);
3047         /* if resp->speed_ability is 0, it means it's an old version
3048          * firmware, do not update these params
3049          */
3050         if (resp->speed_ability) {
3051                 mac->module_type = le32_to_cpu(resp->module_type);
3052                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3053                 mac->autoneg = resp->autoneg;
3054                 mac->support_autoneg = resp->autoneg_ability;
3055                 mac->speed_type = QUERY_ACTIVE_SPEED;
3056                 if (!resp->active_fec)
3057                         mac->fec_mode = 0;
3058                 else
3059                         mac->fec_mode = BIT(resp->active_fec);
3060         } else {
3061                 mac->speed_type = QUERY_SFP_SPEED;
3062         }
3063
3064         return 0;
3065 }
3066
3067 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3068                                         struct ethtool_link_ksettings *cmd)
3069 {
3070         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3071         struct hclge_vport *vport = hclge_get_vport(handle);
3072         struct hclge_phy_link_ksetting_0_cmd *req0;
3073         struct hclge_phy_link_ksetting_1_cmd *req1;
3074         u32 supported, advertising, lp_advertising;
3075         struct hclge_dev *hdev = vport->back;
3076         int ret;
3077
3078         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3079                                    true);
3080         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3081         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3082                                    true);
3083
3084         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3085         if (ret) {
3086                 dev_err(&hdev->pdev->dev,
3087                         "failed to get phy link ksetting, ret = %d.\n", ret);
3088                 return ret;
3089         }
3090
3091         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3092         cmd->base.autoneg = req0->autoneg;
3093         cmd->base.speed = le32_to_cpu(req0->speed);
3094         cmd->base.duplex = req0->duplex;
3095         cmd->base.port = req0->port;
3096         cmd->base.transceiver = req0->transceiver;
3097         cmd->base.phy_address = req0->phy_address;
3098         cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3099         cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3100         supported = le32_to_cpu(req0->supported);
3101         advertising = le32_to_cpu(req0->advertising);
3102         lp_advertising = le32_to_cpu(req0->lp_advertising);
3103         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3104                                                 supported);
3105         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3106                                                 advertising);
3107         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3108                                                 lp_advertising);
3109
3110         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3111         cmd->base.master_slave_cfg = req1->master_slave_cfg;
3112         cmd->base.master_slave_state = req1->master_slave_state;
3113
3114         return 0;
3115 }
3116
3117 static int
3118 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3119                              const struct ethtool_link_ksettings *cmd)
3120 {
3121         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3122         struct hclge_vport *vport = hclge_get_vport(handle);
3123         struct hclge_phy_link_ksetting_0_cmd *req0;
3124         struct hclge_phy_link_ksetting_1_cmd *req1;
3125         struct hclge_dev *hdev = vport->back;
3126         u32 advertising;
3127         int ret;
3128
3129         if (cmd->base.autoneg == AUTONEG_DISABLE &&
3130             ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3131              (cmd->base.duplex != DUPLEX_HALF &&
3132               cmd->base.duplex != DUPLEX_FULL)))
3133                 return -EINVAL;
3134
3135         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3136                                    false);
3137         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3138         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3139                                    false);
3140
3141         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3142         req0->autoneg = cmd->base.autoneg;
3143         req0->speed = cpu_to_le32(cmd->base.speed);
3144         req0->duplex = cmd->base.duplex;
3145         ethtool_convert_link_mode_to_legacy_u32(&advertising,
3146                                                 cmd->link_modes.advertising);
3147         req0->advertising = cpu_to_le32(advertising);
3148         req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3149
3150         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3151         req1->master_slave_cfg = cmd->base.master_slave_cfg;
3152
3153         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3154         if (ret) {
3155                 dev_err(&hdev->pdev->dev,
3156                         "failed to set phy link ksettings, ret = %d.\n", ret);
3157                 return ret;
3158         }
3159
3160         hdev->hw.mac.autoneg = cmd->base.autoneg;
3161         hdev->hw.mac.speed = cmd->base.speed;
3162         hdev->hw.mac.duplex = cmd->base.duplex;
3163         linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3164
3165         return 0;
3166 }
3167
3168 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3169 {
3170         struct ethtool_link_ksettings cmd;
3171         int ret;
3172
3173         if (!hnae3_dev_phy_imp_supported(hdev))
3174                 return 0;
3175
3176         ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3177         if (ret)
3178                 return ret;
3179
3180         hdev->hw.mac.autoneg = cmd.base.autoneg;
3181         hdev->hw.mac.speed = cmd.base.speed;
3182         hdev->hw.mac.duplex = cmd.base.duplex;
3183
3184         return 0;
3185 }
3186
3187 static int hclge_tp_port_init(struct hclge_dev *hdev)
3188 {
3189         struct ethtool_link_ksettings cmd;
3190
3191         if (!hnae3_dev_phy_imp_supported(hdev))
3192                 return 0;
3193
3194         cmd.base.autoneg = hdev->hw.mac.autoneg;
3195         cmd.base.speed = hdev->hw.mac.speed;
3196         cmd.base.duplex = hdev->hw.mac.duplex;
3197         linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3198
3199         return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3200 }
3201
3202 static int hclge_update_port_info(struct hclge_dev *hdev)
3203 {
3204         struct hclge_mac *mac = &hdev->hw.mac;
3205         int speed = HCLGE_MAC_SPEED_UNKNOWN;
3206         int ret;
3207
3208         /* get the port info from SFP cmd if not copper port */
3209         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3210                 return hclge_update_tp_port_info(hdev);
3211
3212         /* if IMP does not support get SFP/qSFP info, return directly */
3213         if (!hdev->support_sfp_query)
3214                 return 0;
3215
3216         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3217                 ret = hclge_get_sfp_info(hdev, mac);
3218         else
3219                 ret = hclge_get_sfp_speed(hdev, &speed);
3220
3221         if (ret == -EOPNOTSUPP) {
3222                 hdev->support_sfp_query = false;
3223                 return ret;
3224         } else if (ret) {
3225                 return ret;
3226         }
3227
3228         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3229                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3230                         hclge_update_port_capability(hdev, mac);
3231                         return 0;
3232                 }
3233                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3234                                                HCLGE_MAC_FULL);
3235         } else {
3236                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3237                         return 0; /* do nothing if no SFP */
3238
3239                 /* must config full duplex for SFP */
3240                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3241         }
3242 }
3243
3244 static int hclge_get_status(struct hnae3_handle *handle)
3245 {
3246         struct hclge_vport *vport = hclge_get_vport(handle);
3247         struct hclge_dev *hdev = vport->back;
3248
3249         hclge_update_link_status(hdev);
3250
3251         return hdev->hw.mac.link;
3252 }
3253
3254 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3255 {
3256         if (!pci_num_vf(hdev->pdev)) {
3257                 dev_err(&hdev->pdev->dev,
3258                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3259                 return NULL;
3260         }
3261
3262         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3263                 dev_err(&hdev->pdev->dev,
3264                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3265                         vf, pci_num_vf(hdev->pdev));
3266                 return NULL;
3267         }
3268
3269         /* VF start from 1 in vport */
3270         vf += HCLGE_VF_VPORT_START_NUM;
3271         return &hdev->vport[vf];
3272 }
3273
3274 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3275                                struct ifla_vf_info *ivf)
3276 {
3277         struct hclge_vport *vport = hclge_get_vport(handle);
3278         struct hclge_dev *hdev = vport->back;
3279
3280         vport = hclge_get_vf_vport(hdev, vf);
3281         if (!vport)
3282                 return -EINVAL;
3283
3284         ivf->vf = vf;
3285         ivf->linkstate = vport->vf_info.link_state;
3286         ivf->spoofchk = vport->vf_info.spoofchk;
3287         ivf->trusted = vport->vf_info.trusted;
3288         ivf->min_tx_rate = 0;
3289         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3290         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3291         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3292         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3293         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3294
3295         return 0;
3296 }
3297
3298 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3299                                    int link_state)
3300 {
3301         struct hclge_vport *vport = hclge_get_vport(handle);
3302         struct hclge_dev *hdev = vport->back;
3303         int link_state_old;
3304         int ret;
3305
3306         vport = hclge_get_vf_vport(hdev, vf);
3307         if (!vport)
3308                 return -EINVAL;
3309
3310         link_state_old = vport->vf_info.link_state;
3311         vport->vf_info.link_state = link_state;
3312
3313         ret = hclge_push_vf_link_status(vport);
3314         if (ret) {
3315                 vport->vf_info.link_state = link_state_old;
3316                 dev_err(&hdev->pdev->dev,
3317                         "failed to push vf%d link status, ret = %d\n", vf, ret);
3318         }
3319
3320         return ret;
3321 }
3322
3323 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3324 {
3325         u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3326
3327         /* fetch the events from their corresponding regs */
3328         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3329         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3330         hw_err_src_reg = hclge_read_dev(&hdev->hw,
3331                                         HCLGE_RAS_PF_OTHER_INT_STS_REG);
3332
3333         /* Assumption: If by any chance reset and mailbox events are reported
3334          * together then we will only process reset event in this go and will
3335          * defer the processing of the mailbox events. Since, we would have not
3336          * cleared RX CMDQ event this time we would receive again another
3337          * interrupt from H/W just for the mailbox.
3338          *
3339          * check for vector0 reset event sources
3340          */
3341         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3342                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3343                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3344                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3345                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3346                 hdev->rst_stats.imp_rst_cnt++;
3347                 return HCLGE_VECTOR0_EVENT_RST;
3348         }
3349
3350         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3351                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3352                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3353                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3354                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3355                 hdev->rst_stats.global_rst_cnt++;
3356                 return HCLGE_VECTOR0_EVENT_RST;
3357         }
3358
3359         /* check for vector0 msix event and hardware error event source */
3360         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3361             hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3362                 return HCLGE_VECTOR0_EVENT_ERR;
3363
3364         /* check for vector0 ptp event source */
3365         if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3366                 *clearval = msix_src_reg;
3367                 return HCLGE_VECTOR0_EVENT_PTP;
3368         }
3369
3370         /* check for vector0 mailbox(=CMDQ RX) event source */
3371         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3372                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3373                 *clearval = cmdq_src_reg;
3374                 return HCLGE_VECTOR0_EVENT_MBX;
3375         }
3376
3377         /* print other vector0 event source */
3378         dev_info(&hdev->pdev->dev,
3379                  "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3380                  cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3381
3382         return HCLGE_VECTOR0_EVENT_OTHER;
3383 }
3384
3385 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3386                                     u32 regclr)
3387 {
3388         switch (event_type) {
3389         case HCLGE_VECTOR0_EVENT_PTP:
3390         case HCLGE_VECTOR0_EVENT_RST:
3391                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3392                 break;
3393         case HCLGE_VECTOR0_EVENT_MBX:
3394                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3395                 break;
3396         default:
3397                 break;
3398         }
3399 }
3400
3401 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3402 {
3403         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3404                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3405                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3406                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3407         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3408 }
3409
3410 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3411 {
3412         writel(enable ? 1 : 0, vector->addr);
3413 }
3414
3415 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3416 {
3417         struct hclge_dev *hdev = data;
3418         unsigned long flags;
3419         u32 clearval = 0;
3420         u32 event_cause;
3421
3422         hclge_enable_vector(&hdev->misc_vector, false);
3423         event_cause = hclge_check_event_cause(hdev, &clearval);
3424
3425         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3426         switch (event_cause) {
3427         case HCLGE_VECTOR0_EVENT_ERR:
3428                 hclge_errhand_task_schedule(hdev);
3429                 break;
3430         case HCLGE_VECTOR0_EVENT_RST:
3431                 hclge_reset_task_schedule(hdev);
3432                 break;
3433         case HCLGE_VECTOR0_EVENT_PTP:
3434                 spin_lock_irqsave(&hdev->ptp->lock, flags);
3435                 hclge_ptp_clean_tx_hwts(hdev);
3436                 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3437                 break;
3438         case HCLGE_VECTOR0_EVENT_MBX:
3439                 /* If we are here then,
3440                  * 1. Either we are not handling any mbx task and we are not
3441                  *    scheduled as well
3442                  *                        OR
3443                  * 2. We could be handling a mbx task but nothing more is
3444                  *    scheduled.
3445                  * In both cases, we should schedule mbx task as there are more
3446                  * mbx messages reported by this interrupt.
3447                  */
3448                 hclge_mbx_task_schedule(hdev);
3449                 break;
3450         default:
3451                 dev_warn(&hdev->pdev->dev,
3452                          "received unknown or unhandled event of vector0\n");
3453                 break;
3454         }
3455
3456         hclge_clear_event_cause(hdev, event_cause, clearval);
3457
3458         /* Enable interrupt if it is not caused by reset event or error event */
3459         if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3460             event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3461             event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3462                 hclge_enable_vector(&hdev->misc_vector, true);
3463
3464         return IRQ_HANDLED;
3465 }
3466
3467 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3468 {
3469         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3470                 dev_warn(&hdev->pdev->dev,
3471                          "vector(vector_id %d) has been freed.\n", vector_id);
3472                 return;
3473         }
3474
3475         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3476         hdev->num_msi_left += 1;
3477         hdev->num_msi_used -= 1;
3478 }
3479
3480 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3481 {
3482         struct hclge_misc_vector *vector = &hdev->misc_vector;
3483
3484         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3485
3486         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3487         hdev->vector_status[0] = 0;
3488
3489         hdev->num_msi_left -= 1;
3490         hdev->num_msi_used += 1;
3491 }
3492
3493 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3494                                       const cpumask_t *mask)
3495 {
3496         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3497                                               affinity_notify);
3498
3499         cpumask_copy(&hdev->affinity_mask, mask);
3500 }
3501
3502 static void hclge_irq_affinity_release(struct kref *ref)
3503 {
3504 }
3505
3506 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3507 {
3508         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3509                               &hdev->affinity_mask);
3510
3511         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3512         hdev->affinity_notify.release = hclge_irq_affinity_release;
3513         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3514                                   &hdev->affinity_notify);
3515 }
3516
3517 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3518 {
3519         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3520         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3521 }
3522
3523 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3524 {
3525         int ret;
3526
3527         hclge_get_misc_vector(hdev);
3528
3529         /* this would be explicitly freed in the end */
3530         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3531                  HCLGE_NAME, pci_name(hdev->pdev));
3532         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3533                           0, hdev->misc_vector.name, hdev);
3534         if (ret) {
3535                 hclge_free_vector(hdev, 0);
3536                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3537                         hdev->misc_vector.vector_irq);
3538         }
3539
3540         return ret;
3541 }
3542
3543 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3544 {
3545         free_irq(hdev->misc_vector.vector_irq, hdev);
3546         hclge_free_vector(hdev, 0);
3547 }
3548
3549 int hclge_notify_client(struct hclge_dev *hdev,
3550                         enum hnae3_reset_notify_type type)
3551 {
3552         struct hnae3_handle *handle = &hdev->vport[0].nic;
3553         struct hnae3_client *client = hdev->nic_client;
3554         int ret;
3555
3556         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3557                 return 0;
3558
3559         if (!client->ops->reset_notify)
3560                 return -EOPNOTSUPP;
3561
3562         ret = client->ops->reset_notify(handle, type);
3563         if (ret)
3564                 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3565                         type, ret);
3566
3567         return ret;
3568 }
3569
3570 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3571                                     enum hnae3_reset_notify_type type)
3572 {
3573         struct hnae3_handle *handle = &hdev->vport[0].roce;
3574         struct hnae3_client *client = hdev->roce_client;
3575         int ret;
3576
3577         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3578                 return 0;
3579
3580         if (!client->ops->reset_notify)
3581                 return -EOPNOTSUPP;
3582
3583         ret = client->ops->reset_notify(handle, type);
3584         if (ret)
3585                 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3586                         type, ret);
3587
3588         return ret;
3589 }
3590
3591 static int hclge_reset_wait(struct hclge_dev *hdev)
3592 {
3593 #define HCLGE_RESET_WATI_MS     100
3594 #define HCLGE_RESET_WAIT_CNT    350
3595
3596         u32 val, reg, reg_bit;
3597         u32 cnt = 0;
3598
3599         switch (hdev->reset_type) {
3600         case HNAE3_IMP_RESET:
3601                 reg = HCLGE_GLOBAL_RESET_REG;
3602                 reg_bit = HCLGE_IMP_RESET_BIT;
3603                 break;
3604         case HNAE3_GLOBAL_RESET:
3605                 reg = HCLGE_GLOBAL_RESET_REG;
3606                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3607                 break;
3608         case HNAE3_FUNC_RESET:
3609                 reg = HCLGE_FUN_RST_ING;
3610                 reg_bit = HCLGE_FUN_RST_ING_B;
3611                 break;
3612         default:
3613                 dev_err(&hdev->pdev->dev,
3614                         "Wait for unsupported reset type: %d\n",
3615                         hdev->reset_type);
3616                 return -EINVAL;
3617         }
3618
3619         val = hclge_read_dev(&hdev->hw, reg);
3620         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3621                 msleep(HCLGE_RESET_WATI_MS);
3622                 val = hclge_read_dev(&hdev->hw, reg);
3623                 cnt++;
3624         }
3625
3626         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3627                 dev_warn(&hdev->pdev->dev,
3628                          "Wait for reset timeout: %d\n", hdev->reset_type);
3629                 return -EBUSY;
3630         }
3631
3632         return 0;
3633 }
3634
3635 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3636 {
3637         struct hclge_vf_rst_cmd *req;
3638         struct hclge_desc desc;
3639
3640         req = (struct hclge_vf_rst_cmd *)desc.data;
3641         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3642         req->dest_vfid = func_id;
3643
3644         if (reset)
3645                 req->vf_rst = 0x1;
3646
3647         return hclge_cmd_send(&hdev->hw, &desc, 1);
3648 }
3649
3650 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3651 {
3652         int i;
3653
3654         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3655                 struct hclge_vport *vport = &hdev->vport[i];
3656                 int ret;
3657
3658                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3659                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3660                 if (ret) {
3661                         dev_err(&hdev->pdev->dev,
3662                                 "set vf(%u) rst failed %d!\n",
3663                                 vport->vport_id, ret);
3664                         return ret;
3665                 }
3666
3667                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3668                         continue;
3669
3670                 /* Inform VF to process the reset.
3671                  * hclge_inform_reset_assert_to_vf may fail if VF
3672                  * driver is not loaded.
3673                  */
3674                 ret = hclge_inform_reset_assert_to_vf(vport);
3675                 if (ret)
3676                         dev_warn(&hdev->pdev->dev,
3677                                  "inform reset to vf(%u) failed %d!\n",
3678                                  vport->vport_id, ret);
3679         }
3680
3681         return 0;
3682 }
3683
3684 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3685 {
3686         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3687             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3688             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3689                 return;
3690
3691         hclge_mbx_handler(hdev);
3692
3693         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3694 }
3695
3696 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3697 {
3698         struct hclge_pf_rst_sync_cmd *req;
3699         struct hclge_desc desc;
3700         int cnt = 0;
3701         int ret;
3702
3703         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3704         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3705
3706         do {
3707                 /* vf need to down netdev by mbx during PF or FLR reset */
3708                 hclge_mailbox_service_task(hdev);
3709
3710                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3711                 /* for compatible with old firmware, wait
3712                  * 100 ms for VF to stop IO
3713                  */
3714                 if (ret == -EOPNOTSUPP) {
3715                         msleep(HCLGE_RESET_SYNC_TIME);
3716                         return;
3717                 } else if (ret) {
3718                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3719                                  ret);
3720                         return;
3721                 } else if (req->all_vf_ready) {
3722                         return;
3723                 }
3724                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3725                 hclge_cmd_reuse_desc(&desc, true);
3726         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3727
3728         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3729 }
3730
3731 void hclge_report_hw_error(struct hclge_dev *hdev,
3732                            enum hnae3_hw_error_type type)
3733 {
3734         struct hnae3_client *client = hdev->nic_client;
3735
3736         if (!client || !client->ops->process_hw_error ||
3737             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3738                 return;
3739
3740         client->ops->process_hw_error(&hdev->vport[0].nic, type);
3741 }
3742
3743 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3744 {
3745         u32 reg_val;
3746
3747         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3748         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3749                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3750                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3751                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3752         }
3753
3754         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3755                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3756                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3757                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3758         }
3759 }
3760
3761 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3762 {
3763         struct hclge_desc desc;
3764         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3765         int ret;
3766
3767         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3768         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3769         req->fun_reset_vfid = func_id;
3770
3771         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3772         if (ret)
3773                 dev_err(&hdev->pdev->dev,
3774                         "send function reset cmd fail, status =%d\n", ret);
3775
3776         return ret;
3777 }
3778
3779 static void hclge_do_reset(struct hclge_dev *hdev)
3780 {
3781         struct hnae3_handle *handle = &hdev->vport[0].nic;
3782         struct pci_dev *pdev = hdev->pdev;
3783         u32 val;
3784
3785         if (hclge_get_hw_reset_stat(handle)) {
3786                 dev_info(&pdev->dev, "hardware reset not finish\n");
3787                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3788                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3789                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3790                 return;
3791         }
3792
3793         switch (hdev->reset_type) {
3794         case HNAE3_IMP_RESET:
3795                 dev_info(&pdev->dev, "IMP reset requested\n");
3796                 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3797                 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3798                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3799                 break;
3800         case HNAE3_GLOBAL_RESET:
3801                 dev_info(&pdev->dev, "global reset requested\n");
3802                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3803                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3804                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3805                 break;
3806         case HNAE3_FUNC_RESET:
3807                 dev_info(&pdev->dev, "PF reset requested\n");
3808                 /* schedule again to check later */
3809                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3810                 hclge_reset_task_schedule(hdev);
3811                 break;
3812         default:
3813                 dev_warn(&pdev->dev,
3814                          "unsupported reset type: %d\n", hdev->reset_type);
3815                 break;
3816         }
3817 }
3818
3819 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3820                                                    unsigned long *addr)
3821 {
3822         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3823         struct hclge_dev *hdev = ae_dev->priv;
3824
3825         /* return the highest priority reset level amongst all */
3826         if (test_bit(HNAE3_IMP_RESET, addr)) {
3827                 rst_level = HNAE3_IMP_RESET;
3828                 clear_bit(HNAE3_IMP_RESET, addr);
3829                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3830                 clear_bit(HNAE3_FUNC_RESET, addr);
3831         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3832                 rst_level = HNAE3_GLOBAL_RESET;
3833                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3834                 clear_bit(HNAE3_FUNC_RESET, addr);
3835         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3836                 rst_level = HNAE3_FUNC_RESET;
3837                 clear_bit(HNAE3_FUNC_RESET, addr);
3838         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3839                 rst_level = HNAE3_FLR_RESET;
3840                 clear_bit(HNAE3_FLR_RESET, addr);
3841         }
3842
3843         if (hdev->reset_type != HNAE3_NONE_RESET &&
3844             rst_level < hdev->reset_type)
3845                 return HNAE3_NONE_RESET;
3846
3847         return rst_level;
3848 }
3849
3850 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3851 {
3852         u32 clearval = 0;
3853
3854         switch (hdev->reset_type) {
3855         case HNAE3_IMP_RESET:
3856                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3857                 break;
3858         case HNAE3_GLOBAL_RESET:
3859                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3860                 break;
3861         default:
3862                 break;
3863         }
3864
3865         if (!clearval)
3866                 return;
3867
3868         /* For revision 0x20, the reset interrupt source
3869          * can only be cleared after hardware reset done
3870          */
3871         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3872                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3873                                 clearval);
3874
3875         hclge_enable_vector(&hdev->misc_vector, true);
3876 }
3877
3878 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3879 {
3880         u32 reg_val;
3881
3882         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3883         if (enable)
3884                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3885         else
3886                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3887
3888         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3889 }
3890
3891 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3892 {
3893         int ret;
3894
3895         ret = hclge_set_all_vf_rst(hdev, true);
3896         if (ret)
3897                 return ret;
3898
3899         hclge_func_reset_sync_vf(hdev);
3900
3901         return 0;
3902 }
3903
3904 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3905 {
3906         u32 reg_val;
3907         int ret = 0;
3908
3909         switch (hdev->reset_type) {
3910         case HNAE3_FUNC_RESET:
3911                 ret = hclge_func_reset_notify_vf(hdev);
3912                 if (ret)
3913                         return ret;
3914
3915                 ret = hclge_func_reset_cmd(hdev, 0);
3916                 if (ret) {
3917                         dev_err(&hdev->pdev->dev,
3918                                 "asserting function reset fail %d!\n", ret);
3919                         return ret;
3920                 }
3921
3922                 /* After performaning pf reset, it is not necessary to do the
3923                  * mailbox handling or send any command to firmware, because
3924                  * any mailbox handling or command to firmware is only valid
3925                  * after hclge_cmd_init is called.
3926                  */
3927                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3928                 hdev->rst_stats.pf_rst_cnt++;
3929                 break;
3930         case HNAE3_FLR_RESET:
3931                 ret = hclge_func_reset_notify_vf(hdev);
3932                 if (ret)
3933                         return ret;
3934                 break;
3935         case HNAE3_IMP_RESET:
3936                 hclge_handle_imp_error(hdev);
3937                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3938                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3939                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3940                 break;
3941         default:
3942                 break;
3943         }
3944
3945         /* inform hardware that preparatory work is done */
3946         msleep(HCLGE_RESET_SYNC_TIME);
3947         hclge_reset_handshake(hdev, true);
3948         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3949
3950         return ret;
3951 }
3952
3953 static void hclge_show_rst_info(struct hclge_dev *hdev)
3954 {
3955         char *buf;
3956
3957         buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3958         if (!buf)
3959                 return;
3960
3961         hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3962
3963         dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3964
3965         kfree(buf);
3966 }
3967
3968 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3969 {
3970 #define MAX_RESET_FAIL_CNT 5
3971
3972         if (hdev->reset_pending) {
3973                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3974                          hdev->reset_pending);
3975                 return true;
3976         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3977                    HCLGE_RESET_INT_M) {
3978                 dev_info(&hdev->pdev->dev,
3979                          "reset failed because new reset interrupt\n");
3980                 hclge_clear_reset_cause(hdev);
3981                 return false;
3982         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3983                 hdev->rst_stats.reset_fail_cnt++;
3984                 set_bit(hdev->reset_type, &hdev->reset_pending);
3985                 dev_info(&hdev->pdev->dev,
3986                          "re-schedule reset task(%u)\n",
3987                          hdev->rst_stats.reset_fail_cnt);
3988                 return true;
3989         }
3990
3991         hclge_clear_reset_cause(hdev);
3992
3993         /* recover the handshake status when reset fail */
3994         hclge_reset_handshake(hdev, true);
3995
3996         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3997
3998         hclge_show_rst_info(hdev);
3999
4000         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4001
4002         return false;
4003 }
4004
4005 static void hclge_update_reset_level(struct hclge_dev *hdev)
4006 {
4007         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4008         enum hnae3_reset_type reset_level;
4009
4010         /* reset request will not be set during reset, so clear
4011          * pending reset request to avoid unnecessary reset
4012          * caused by the same reason.
4013          */
4014         hclge_get_reset_level(ae_dev, &hdev->reset_request);
4015
4016         /* if default_reset_request has a higher level reset request,
4017          * it should be handled as soon as possible. since some errors
4018          * need this kind of reset to fix.
4019          */
4020         reset_level = hclge_get_reset_level(ae_dev,
4021                                             &hdev->default_reset_request);
4022         if (reset_level != HNAE3_NONE_RESET)
4023                 set_bit(reset_level, &hdev->reset_request);
4024 }
4025
4026 static int hclge_set_rst_done(struct hclge_dev *hdev)
4027 {
4028         struct hclge_pf_rst_done_cmd *req;
4029         struct hclge_desc desc;
4030         int ret;
4031
4032         req = (struct hclge_pf_rst_done_cmd *)desc.data;
4033         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4034         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4035
4036         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4037         /* To be compatible with the old firmware, which does not support
4038          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4039          * return success
4040          */
4041         if (ret == -EOPNOTSUPP) {
4042                 dev_warn(&hdev->pdev->dev,
4043                          "current firmware does not support command(0x%x)!\n",
4044                          HCLGE_OPC_PF_RST_DONE);
4045                 return 0;
4046         } else if (ret) {
4047                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4048                         ret);
4049         }
4050
4051         return ret;
4052 }
4053
4054 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4055 {
4056         int ret = 0;
4057
4058         switch (hdev->reset_type) {
4059         case HNAE3_FUNC_RESET:
4060         case HNAE3_FLR_RESET:
4061                 ret = hclge_set_all_vf_rst(hdev, false);
4062                 break;
4063         case HNAE3_GLOBAL_RESET:
4064         case HNAE3_IMP_RESET:
4065                 ret = hclge_set_rst_done(hdev);
4066                 break;
4067         default:
4068                 break;
4069         }
4070
4071         /* clear up the handshake status after re-initialize done */
4072         hclge_reset_handshake(hdev, false);
4073
4074         return ret;
4075 }
4076
4077 static int hclge_reset_stack(struct hclge_dev *hdev)
4078 {
4079         int ret;
4080
4081         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4082         if (ret)
4083                 return ret;
4084
4085         ret = hclge_reset_ae_dev(hdev->ae_dev);
4086         if (ret)
4087                 return ret;
4088
4089         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4090 }
4091
4092 static int hclge_reset_prepare(struct hclge_dev *hdev)
4093 {
4094         int ret;
4095
4096         hdev->rst_stats.reset_cnt++;
4097         /* perform reset of the stack & ae device for a client */
4098         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4099         if (ret)
4100                 return ret;
4101
4102         rtnl_lock();
4103         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4104         rtnl_unlock();
4105         if (ret)
4106                 return ret;
4107
4108         return hclge_reset_prepare_wait(hdev);
4109 }
4110
4111 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4112 {
4113         int ret;
4114
4115         hdev->rst_stats.hw_reset_done_cnt++;
4116
4117         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4118         if (ret)
4119                 return ret;
4120
4121         rtnl_lock();
4122         ret = hclge_reset_stack(hdev);
4123         rtnl_unlock();
4124         if (ret)
4125                 return ret;
4126
4127         hclge_clear_reset_cause(hdev);
4128
4129         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4130         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4131          * times
4132          */
4133         if (ret &&
4134             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4135                 return ret;
4136
4137         ret = hclge_reset_prepare_up(hdev);
4138         if (ret)
4139                 return ret;
4140
4141         rtnl_lock();
4142         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4143         rtnl_unlock();
4144         if (ret)
4145                 return ret;
4146
4147         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4148         if (ret)
4149                 return ret;
4150
4151         hdev->last_reset_time = jiffies;
4152         hdev->rst_stats.reset_fail_cnt = 0;
4153         hdev->rst_stats.reset_done_cnt++;
4154         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4155
4156         hclge_update_reset_level(hdev);
4157
4158         return 0;
4159 }
4160
4161 static void hclge_reset(struct hclge_dev *hdev)
4162 {
4163         if (hclge_reset_prepare(hdev))
4164                 goto err_reset;
4165
4166         if (hclge_reset_wait(hdev))
4167                 goto err_reset;
4168
4169         if (hclge_reset_rebuild(hdev))
4170                 goto err_reset;
4171
4172         return;
4173
4174 err_reset:
4175         if (hclge_reset_err_handle(hdev))
4176                 hclge_reset_task_schedule(hdev);
4177 }
4178
4179 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4180 {
4181         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4182         struct hclge_dev *hdev = ae_dev->priv;
4183
4184         /* We might end up getting called broadly because of 2 below cases:
4185          * 1. Recoverable error was conveyed through APEI and only way to bring
4186          *    normalcy is to reset.
4187          * 2. A new reset request from the stack due to timeout
4188          *
4189          * check if this is a new reset request and we are not here just because
4190          * last reset attempt did not succeed and watchdog hit us again. We will
4191          * know this if last reset request did not occur very recently (watchdog
4192          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4193          * In case of new request we reset the "reset level" to PF reset.
4194          * And if it is a repeat reset request of the most recent one then we
4195          * want to make sure we throttle the reset request. Therefore, we will
4196          * not allow it again before 3*HZ times.
4197          */
4198
4199         if (time_before(jiffies, (hdev->last_reset_time +
4200                                   HCLGE_RESET_INTERVAL))) {
4201                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4202                 return;
4203         }
4204
4205         if (hdev->default_reset_request) {
4206                 hdev->reset_level =
4207                         hclge_get_reset_level(ae_dev,
4208                                               &hdev->default_reset_request);
4209         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4210                 hdev->reset_level = HNAE3_FUNC_RESET;
4211         }
4212
4213         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4214                  hdev->reset_level);
4215
4216         /* request reset & schedule reset task */
4217         set_bit(hdev->reset_level, &hdev->reset_request);
4218         hclge_reset_task_schedule(hdev);
4219
4220         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4221                 hdev->reset_level++;
4222 }
4223
4224 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4225                                         enum hnae3_reset_type rst_type)
4226 {
4227         struct hclge_dev *hdev = ae_dev->priv;
4228
4229         set_bit(rst_type, &hdev->default_reset_request);
4230 }
4231
4232 static void hclge_reset_timer(struct timer_list *t)
4233 {
4234         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4235
4236         /* if default_reset_request has no value, it means that this reset
4237          * request has already be handled, so just return here
4238          */
4239         if (!hdev->default_reset_request)
4240                 return;
4241
4242         dev_info(&hdev->pdev->dev,
4243                  "triggering reset in reset timer\n");
4244         hclge_reset_event(hdev->pdev, NULL);
4245 }
4246
4247 static void hclge_reset_subtask(struct hclge_dev *hdev)
4248 {
4249         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4250
4251         /* check if there is any ongoing reset in the hardware. This status can
4252          * be checked from reset_pending. If there is then, we need to wait for
4253          * hardware to complete reset.
4254          *    a. If we are able to figure out in reasonable time that hardware
4255          *       has fully resetted then, we can proceed with driver, client
4256          *       reset.
4257          *    b. else, we can come back later to check this status so re-sched
4258          *       now.
4259          */
4260         hdev->last_reset_time = jiffies;
4261         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4262         if (hdev->reset_type != HNAE3_NONE_RESET)
4263                 hclge_reset(hdev);
4264
4265         /* check if we got any *new* reset requests to be honored */
4266         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4267         if (hdev->reset_type != HNAE3_NONE_RESET)
4268                 hclge_do_reset(hdev);
4269
4270         hdev->reset_type = HNAE3_NONE_RESET;
4271 }
4272
4273 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4274 {
4275         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4276         enum hnae3_reset_type reset_type;
4277
4278         if (ae_dev->hw_err_reset_req) {
4279                 reset_type = hclge_get_reset_level(ae_dev,
4280                                                    &ae_dev->hw_err_reset_req);
4281                 hclge_set_def_reset_request(ae_dev, reset_type);
4282         }
4283
4284         if (hdev->default_reset_request && ae_dev->ops->reset_event)
4285                 ae_dev->ops->reset_event(hdev->pdev, NULL);
4286
4287         /* enable interrupt after error handling complete */
4288         hclge_enable_vector(&hdev->misc_vector, true);
4289 }
4290
4291 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4292 {
4293         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4294
4295         ae_dev->hw_err_reset_req = 0;
4296
4297         if (hclge_find_error_source(hdev)) {
4298                 hclge_handle_error_info_log(ae_dev);
4299                 hclge_handle_mac_tnl(hdev);
4300         }
4301
4302         hclge_handle_err_reset_request(hdev);
4303 }
4304
4305 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4306 {
4307         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4308         struct device *dev = &hdev->pdev->dev;
4309         u32 msix_sts_reg;
4310
4311         msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4312         if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4313                 if (hclge_handle_hw_msix_error
4314                                 (hdev, &hdev->default_reset_request))
4315                         dev_info(dev, "received msix interrupt 0x%x\n",
4316                                  msix_sts_reg);
4317         }
4318
4319         hclge_handle_hw_ras_error(ae_dev);
4320
4321         hclge_handle_err_reset_request(hdev);
4322 }
4323
4324 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4325 {
4326         if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4327                 return;
4328
4329         if (hnae3_dev_ras_imp_supported(hdev))
4330                 hclge_handle_err_recovery(hdev);
4331         else
4332                 hclge_misc_err_recovery(hdev);
4333 }
4334
4335 static void hclge_reset_service_task(struct hclge_dev *hdev)
4336 {
4337         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4338                 return;
4339
4340         down(&hdev->reset_sem);
4341         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4342
4343         hclge_reset_subtask(hdev);
4344
4345         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4346         up(&hdev->reset_sem);
4347 }
4348
4349 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4350 {
4351         int i;
4352
4353         /* start from vport 1 for PF is always alive */
4354         for (i = 1; i < hdev->num_alloc_vport; i++) {
4355                 struct hclge_vport *vport = &hdev->vport[i];
4356
4357                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4358                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4359
4360                 /* If vf is not alive, set to default value */
4361                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4362                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4363         }
4364 }
4365
4366 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4367 {
4368         unsigned long delta = round_jiffies_relative(HZ);
4369
4370         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4371                 return;
4372
4373         /* Always handle the link updating to make sure link state is
4374          * updated when it is triggered by mbx.
4375          */
4376         hclge_update_link_status(hdev);
4377         hclge_sync_mac_table(hdev);
4378         hclge_sync_promisc_mode(hdev);
4379         hclge_sync_fd_table(hdev);
4380
4381         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4382                 delta = jiffies - hdev->last_serv_processed;
4383
4384                 if (delta < round_jiffies_relative(HZ)) {
4385                         delta = round_jiffies_relative(HZ) - delta;
4386                         goto out;
4387                 }
4388         }
4389
4390         hdev->serv_processed_cnt++;
4391         hclge_update_vport_alive(hdev);
4392
4393         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4394                 hdev->last_serv_processed = jiffies;
4395                 goto out;
4396         }
4397
4398         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4399                 hclge_update_stats_for_all(hdev);
4400
4401         hclge_update_port_info(hdev);
4402         hclge_sync_vlan_filter(hdev);
4403
4404         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4405                 hclge_rfs_filter_expire(hdev);
4406
4407         hdev->last_serv_processed = jiffies;
4408
4409 out:
4410         hclge_task_schedule(hdev, delta);
4411 }
4412
4413 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4414 {
4415         unsigned long flags;
4416
4417         if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4418             !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4419             !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4420                 return;
4421
4422         /* to prevent concurrence with the irq handler */
4423         spin_lock_irqsave(&hdev->ptp->lock, flags);
4424
4425         /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4426          * handler may handle it just before spin_lock_irqsave().
4427          */
4428         if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4429                 hclge_ptp_clean_tx_hwts(hdev);
4430
4431         spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4432 }
4433
4434 static void hclge_service_task(struct work_struct *work)
4435 {
4436         struct hclge_dev *hdev =
4437                 container_of(work, struct hclge_dev, service_task.work);
4438
4439         hclge_errhand_service_task(hdev);
4440         hclge_reset_service_task(hdev);
4441         hclge_ptp_service_task(hdev);
4442         hclge_mailbox_service_task(hdev);
4443         hclge_periodic_service_task(hdev);
4444
4445         /* Handle error recovery, reset and mbx again in case periodical task
4446          * delays the handling by calling hclge_task_schedule() in
4447          * hclge_periodic_service_task().
4448          */
4449         hclge_errhand_service_task(hdev);
4450         hclge_reset_service_task(hdev);
4451         hclge_mailbox_service_task(hdev);
4452 }
4453
4454 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4455 {
4456         /* VF handle has no client */
4457         if (!handle->client)
4458                 return container_of(handle, struct hclge_vport, nic);
4459         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4460                 return container_of(handle, struct hclge_vport, roce);
4461         else
4462                 return container_of(handle, struct hclge_vport, nic);
4463 }
4464
4465 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4466                                   struct hnae3_vector_info *vector_info)
4467 {
4468 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4469
4470         vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4471
4472         /* need an extend offset to config vector >= 64 */
4473         if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4474                 vector_info->io_addr = hdev->hw.io_base +
4475                                 HCLGE_VECTOR_REG_BASE +
4476                                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4477         else
4478                 vector_info->io_addr = hdev->hw.io_base +
4479                                 HCLGE_VECTOR_EXT_REG_BASE +
4480                                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4481                                 HCLGE_VECTOR_REG_OFFSET_H +
4482                                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4483                                 HCLGE_VECTOR_REG_OFFSET;
4484
4485         hdev->vector_status[idx] = hdev->vport[0].vport_id;
4486         hdev->vector_irq[idx] = vector_info->vector;
4487 }
4488
4489 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4490                             struct hnae3_vector_info *vector_info)
4491 {
4492         struct hclge_vport *vport = hclge_get_vport(handle);
4493         struct hnae3_vector_info *vector = vector_info;
4494         struct hclge_dev *hdev = vport->back;
4495         int alloc = 0;
4496         u16 i = 0;
4497         u16 j;
4498
4499         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4500         vector_num = min(hdev->num_msi_left, vector_num);
4501
4502         for (j = 0; j < vector_num; j++) {
4503                 while (++i < hdev->num_nic_msi) {
4504                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4505                                 hclge_get_vector_info(hdev, i, vector);
4506                                 vector++;
4507                                 alloc++;
4508
4509                                 break;
4510                         }
4511                 }
4512         }
4513         hdev->num_msi_left -= alloc;
4514         hdev->num_msi_used += alloc;
4515
4516         return alloc;
4517 }
4518
4519 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4520 {
4521         int i;
4522
4523         for (i = 0; i < hdev->num_msi; i++)
4524                 if (vector == hdev->vector_irq[i])
4525                         return i;
4526
4527         return -EINVAL;
4528 }
4529
4530 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4531 {
4532         struct hclge_vport *vport = hclge_get_vport(handle);
4533         struct hclge_dev *hdev = vport->back;
4534         int vector_id;
4535
4536         vector_id = hclge_get_vector_index(hdev, vector);
4537         if (vector_id < 0) {
4538                 dev_err(&hdev->pdev->dev,
4539                         "Get vector index fail. vector = %d\n", vector);
4540                 return vector_id;
4541         }
4542
4543         hclge_free_vector(hdev, vector_id);
4544
4545         return 0;
4546 }
4547
4548 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4549 {
4550         return HCLGE_RSS_KEY_SIZE;
4551 }
4552
4553 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4554                                   const u8 hfunc, const u8 *key)
4555 {
4556         struct hclge_rss_config_cmd *req;
4557         unsigned int key_offset = 0;
4558         struct hclge_desc desc;
4559         int key_counts;
4560         int key_size;
4561         int ret;
4562
4563         key_counts = HCLGE_RSS_KEY_SIZE;
4564         req = (struct hclge_rss_config_cmd *)desc.data;
4565
4566         while (key_counts) {
4567                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4568                                            false);
4569
4570                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4571                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4572
4573                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4574                 memcpy(req->hash_key,
4575                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4576
4577                 key_counts -= key_size;
4578                 key_offset++;
4579                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4580                 if (ret) {
4581                         dev_err(&hdev->pdev->dev,
4582                                 "Configure RSS config fail, status = %d\n",
4583                                 ret);
4584                         return ret;
4585                 }
4586         }
4587         return 0;
4588 }
4589
4590 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4591 {
4592         struct hclge_rss_indirection_table_cmd *req;
4593         struct hclge_desc desc;
4594         int rss_cfg_tbl_num;
4595         u8 rss_msb_oft;
4596         u8 rss_msb_val;
4597         int ret;
4598         u16 qid;
4599         int i;
4600         u32 j;
4601
4602         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4603         rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4604                           HCLGE_RSS_CFG_TBL_SIZE;
4605
4606         for (i = 0; i < rss_cfg_tbl_num; i++) {
4607                 hclge_cmd_setup_basic_desc
4608                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4609
4610                 req->start_table_index =
4611                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4612                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4613                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4614                         qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4615                         req->rss_qid_l[j] = qid & 0xff;
4616                         rss_msb_oft =
4617                                 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4618                         rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4619                                 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4620                         req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4621                 }
4622                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4623                 if (ret) {
4624                         dev_err(&hdev->pdev->dev,
4625                                 "Configure rss indir table fail,status = %d\n",
4626                                 ret);
4627                         return ret;
4628                 }
4629         }
4630         return 0;
4631 }
4632
4633 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4634                                  u16 *tc_size, u16 *tc_offset)
4635 {
4636         struct hclge_rss_tc_mode_cmd *req;
4637         struct hclge_desc desc;
4638         int ret;
4639         int i;
4640
4641         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4642         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4643
4644         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4645                 u16 mode = 0;
4646
4647                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4648                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4649                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4650                 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4651                               tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4652                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4653                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4654
4655                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4656         }
4657
4658         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4659         if (ret)
4660                 dev_err(&hdev->pdev->dev,
4661                         "Configure rss tc mode fail, status = %d\n", ret);
4662
4663         return ret;
4664 }
4665
4666 static void hclge_get_rss_type(struct hclge_vport *vport)
4667 {
4668         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4669             vport->rss_tuple_sets.ipv4_udp_en ||
4670             vport->rss_tuple_sets.ipv4_sctp_en ||
4671             vport->rss_tuple_sets.ipv6_tcp_en ||
4672             vport->rss_tuple_sets.ipv6_udp_en ||
4673             vport->rss_tuple_sets.ipv6_sctp_en)
4674                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4675         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4676                  vport->rss_tuple_sets.ipv6_fragment_en)
4677                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4678         else
4679                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4680 }
4681
4682 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4683 {
4684         struct hclge_rss_input_tuple_cmd *req;
4685         struct hclge_desc desc;
4686         int ret;
4687
4688         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4689
4690         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4691
4692         /* Get the tuple cfg from pf */
4693         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4694         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4695         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4696         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4697         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4698         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4699         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4700         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4701         hclge_get_rss_type(&hdev->vport[0]);
4702         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4703         if (ret)
4704                 dev_err(&hdev->pdev->dev,
4705                         "Configure rss input fail, status = %d\n", ret);
4706         return ret;
4707 }
4708
4709 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4710                          u8 *key, u8 *hfunc)
4711 {
4712         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4713         struct hclge_vport *vport = hclge_get_vport(handle);
4714         int i;
4715
4716         /* Get hash algorithm */
4717         if (hfunc) {
4718                 switch (vport->rss_algo) {
4719                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4720                         *hfunc = ETH_RSS_HASH_TOP;
4721                         break;
4722                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4723                         *hfunc = ETH_RSS_HASH_XOR;
4724                         break;
4725                 default:
4726                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4727                         break;
4728                 }
4729         }
4730
4731         /* Get the RSS Key required by the user */
4732         if (key)
4733                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4734
4735         /* Get indirect table */
4736         if (indir)
4737                 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4738                         indir[i] =  vport->rss_indirection_tbl[i];
4739
4740         return 0;
4741 }
4742
4743 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4744                          const  u8 *key, const  u8 hfunc)
4745 {
4746         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4747         struct hclge_vport *vport = hclge_get_vport(handle);
4748         struct hclge_dev *hdev = vport->back;
4749         u8 hash_algo;
4750         int ret, i;
4751
4752         /* Set the RSS Hash Key if specififed by the user */
4753         if (key) {
4754                 switch (hfunc) {
4755                 case ETH_RSS_HASH_TOP:
4756                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4757                         break;
4758                 case ETH_RSS_HASH_XOR:
4759                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4760                         break;
4761                 case ETH_RSS_HASH_NO_CHANGE:
4762                         hash_algo = vport->rss_algo;
4763                         break;
4764                 default:
4765                         return -EINVAL;
4766                 }
4767
4768                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4769                 if (ret)
4770                         return ret;
4771
4772                 /* Update the shadow RSS key with user specified qids */
4773                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4774                 vport->rss_algo = hash_algo;
4775         }
4776
4777         /* Update the shadow RSS table with user specified qids */
4778         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4779                 vport->rss_indirection_tbl[i] = indir[i];
4780
4781         /* Update the hardware */
4782         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4783 }
4784
4785 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4786 {
4787         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4788
4789         if (nfc->data & RXH_L4_B_2_3)
4790                 hash_sets |= HCLGE_D_PORT_BIT;
4791         else
4792                 hash_sets &= ~HCLGE_D_PORT_BIT;
4793
4794         if (nfc->data & RXH_IP_SRC)
4795                 hash_sets |= HCLGE_S_IP_BIT;
4796         else
4797                 hash_sets &= ~HCLGE_S_IP_BIT;
4798
4799         if (nfc->data & RXH_IP_DST)
4800                 hash_sets |= HCLGE_D_IP_BIT;
4801         else
4802                 hash_sets &= ~HCLGE_D_IP_BIT;
4803
4804         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4805                 hash_sets |= HCLGE_V_TAG_BIT;
4806
4807         return hash_sets;
4808 }
4809
4810 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4811                                     struct ethtool_rxnfc *nfc,
4812                                     struct hclge_rss_input_tuple_cmd *req)
4813 {
4814         struct hclge_dev *hdev = vport->back;
4815         u8 tuple_sets;
4816
4817         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4818         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4819         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4820         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4821         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4822         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4823         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4824         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4825
4826         tuple_sets = hclge_get_rss_hash_bits(nfc);
4827         switch (nfc->flow_type) {
4828         case TCP_V4_FLOW:
4829                 req->ipv4_tcp_en = tuple_sets;
4830                 break;
4831         case TCP_V6_FLOW:
4832                 req->ipv6_tcp_en = tuple_sets;
4833                 break;
4834         case UDP_V4_FLOW:
4835                 req->ipv4_udp_en = tuple_sets;
4836                 break;
4837         case UDP_V6_FLOW:
4838                 req->ipv6_udp_en = tuple_sets;
4839                 break;
4840         case SCTP_V4_FLOW:
4841                 req->ipv4_sctp_en = tuple_sets;
4842                 break;
4843         case SCTP_V6_FLOW:
4844                 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4845                     (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4846                         return -EINVAL;
4847
4848                 req->ipv6_sctp_en = tuple_sets;
4849                 break;
4850         case IPV4_FLOW:
4851                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4852                 break;
4853         case IPV6_FLOW:
4854                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4855                 break;
4856         default:
4857                 return -EINVAL;
4858         }
4859
4860         return 0;
4861 }
4862
4863 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4864                                struct ethtool_rxnfc *nfc)
4865 {
4866         struct hclge_vport *vport = hclge_get_vport(handle);
4867         struct hclge_dev *hdev = vport->back;
4868         struct hclge_rss_input_tuple_cmd *req;
4869         struct hclge_desc desc;
4870         int ret;
4871
4872         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4873                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4874                 return -EINVAL;
4875
4876         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4877         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4878
4879         ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4880         if (ret) {
4881                 dev_err(&hdev->pdev->dev,
4882                         "failed to init rss tuple cmd, ret = %d\n", ret);
4883                 return ret;
4884         }
4885
4886         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4887         if (ret) {
4888                 dev_err(&hdev->pdev->dev,
4889                         "Set rss tuple fail, status = %d\n", ret);
4890                 return ret;
4891         }
4892
4893         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4894         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4895         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4896         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4897         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4898         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4899         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4900         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4901         hclge_get_rss_type(vport);
4902         return 0;
4903 }
4904
4905 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4906                                      u8 *tuple_sets)
4907 {
4908         switch (flow_type) {
4909         case TCP_V4_FLOW:
4910                 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4911                 break;
4912         case UDP_V4_FLOW:
4913                 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4914                 break;
4915         case TCP_V6_FLOW:
4916                 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4917                 break;
4918         case UDP_V6_FLOW:
4919                 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4920                 break;
4921         case SCTP_V4_FLOW:
4922                 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4923                 break;
4924         case SCTP_V6_FLOW:
4925                 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4926                 break;
4927         case IPV4_FLOW:
4928         case IPV6_FLOW:
4929                 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4930                 break;
4931         default:
4932                 return -EINVAL;
4933         }
4934
4935         return 0;
4936 }
4937
4938 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4939 {
4940         u64 tuple_data = 0;
4941
4942         if (tuple_sets & HCLGE_D_PORT_BIT)
4943                 tuple_data |= RXH_L4_B_2_3;
4944         if (tuple_sets & HCLGE_S_PORT_BIT)
4945                 tuple_data |= RXH_L4_B_0_1;
4946         if (tuple_sets & HCLGE_D_IP_BIT)
4947                 tuple_data |= RXH_IP_DST;
4948         if (tuple_sets & HCLGE_S_IP_BIT)
4949                 tuple_data |= RXH_IP_SRC;
4950
4951         return tuple_data;
4952 }
4953
4954 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4955                                struct ethtool_rxnfc *nfc)
4956 {
4957         struct hclge_vport *vport = hclge_get_vport(handle);
4958         u8 tuple_sets;
4959         int ret;
4960
4961         nfc->data = 0;
4962
4963         ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4964         if (ret || !tuple_sets)
4965                 return ret;
4966
4967         nfc->data = hclge_convert_rss_tuple(tuple_sets);
4968
4969         return 0;
4970 }
4971
4972 static int hclge_get_tc_size(struct hnae3_handle *handle)
4973 {
4974         struct hclge_vport *vport = hclge_get_vport(handle);
4975         struct hclge_dev *hdev = vport->back;
4976
4977         return hdev->pf_rss_size_max;
4978 }
4979
4980 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4981 {
4982         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4983         struct hclge_vport *vport = hdev->vport;
4984         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4985         u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4986         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4987         struct hnae3_tc_info *tc_info;
4988         u16 roundup_size;
4989         u16 rss_size;
4990         int i;
4991
4992         tc_info = &vport->nic.kinfo.tc_info;
4993         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4994                 rss_size = tc_info->tqp_count[i];
4995                 tc_valid[i] = 0;
4996
4997                 if (!(hdev->hw_tc_map & BIT(i)))
4998                         continue;
4999
5000                 /* tc_size set to hardware is the log2 of roundup power of two
5001                  * of rss_size, the acutal queue size is limited by indirection
5002                  * table.
5003                  */
5004                 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5005                     rss_size == 0) {
5006                         dev_err(&hdev->pdev->dev,
5007                                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
5008                                 rss_size);
5009                         return -EINVAL;
5010                 }
5011
5012                 roundup_size = roundup_pow_of_two(rss_size);
5013                 roundup_size = ilog2(roundup_size);
5014
5015                 tc_valid[i] = 1;
5016                 tc_size[i] = roundup_size;
5017                 tc_offset[i] = tc_info->tqp_offset[i];
5018         }
5019
5020         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5021 }
5022
5023 int hclge_rss_init_hw(struct hclge_dev *hdev)
5024 {
5025         struct hclge_vport *vport = hdev->vport;
5026         u16 *rss_indir = vport[0].rss_indirection_tbl;
5027         u8 *key = vport[0].rss_hash_key;
5028         u8 hfunc = vport[0].rss_algo;
5029         int ret;
5030
5031         ret = hclge_set_rss_indir_table(hdev, rss_indir);
5032         if (ret)
5033                 return ret;
5034
5035         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5036         if (ret)
5037                 return ret;
5038
5039         ret = hclge_set_rss_input_tuple(hdev);
5040         if (ret)
5041                 return ret;
5042
5043         return hclge_init_rss_tc_mode(hdev);
5044 }
5045
5046 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5047 {
5048         struct hclge_vport *vport = &hdev->vport[0];
5049         int i;
5050
5051         for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5052                 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5053 }
5054
5055 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5056 {
5057         u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5058         int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5059         struct hclge_vport *vport = &hdev->vport[0];
5060         u16 *rss_ind_tbl;
5061
5062         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5063                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5064
5065         vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5066         vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5067         vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5068         vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5069         vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5070         vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5071         vport->rss_tuple_sets.ipv6_sctp_en =
5072                 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5073                 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5074                 HCLGE_RSS_INPUT_TUPLE_SCTP;
5075         vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5076
5077         vport->rss_algo = rss_algo;
5078
5079         rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5080                                    sizeof(*rss_ind_tbl), GFP_KERNEL);
5081         if (!rss_ind_tbl)
5082                 return -ENOMEM;
5083
5084         vport->rss_indirection_tbl = rss_ind_tbl;
5085         memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5086
5087         hclge_rss_indir_init_cfg(hdev);
5088
5089         return 0;
5090 }
5091
5092 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5093                                 int vector_id, bool en,
5094                                 struct hnae3_ring_chain_node *ring_chain)
5095 {
5096         struct hclge_dev *hdev = vport->back;
5097         struct hnae3_ring_chain_node *node;
5098         struct hclge_desc desc;
5099         struct hclge_ctrl_vector_chain_cmd *req =
5100                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5101         enum hclge_cmd_status status;
5102         enum hclge_opcode_type op;
5103         u16 tqp_type_and_id;
5104         int i;
5105
5106         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5107         hclge_cmd_setup_basic_desc(&desc, op, false);
5108         req->int_vector_id_l = hnae3_get_field(vector_id,
5109                                                HCLGE_VECTOR_ID_L_M,
5110                                                HCLGE_VECTOR_ID_L_S);
5111         req->int_vector_id_h = hnae3_get_field(vector_id,
5112                                                HCLGE_VECTOR_ID_H_M,
5113                                                HCLGE_VECTOR_ID_H_S);
5114
5115         i = 0;
5116         for (node = ring_chain; node; node = node->next) {
5117                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5118                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5119                                 HCLGE_INT_TYPE_S,
5120                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5121                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5122                                 HCLGE_TQP_ID_S, node->tqp_index);
5123                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5124                                 HCLGE_INT_GL_IDX_S,
5125                                 hnae3_get_field(node->int_gl_idx,
5126                                                 HNAE3_RING_GL_IDX_M,
5127                                                 HNAE3_RING_GL_IDX_S));
5128                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5129                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5130                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5131                         req->vfid = vport->vport_id;
5132
5133                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
5134                         if (status) {
5135                                 dev_err(&hdev->pdev->dev,
5136                                         "Map TQP fail, status is %d.\n",
5137                                         status);
5138                                 return -EIO;
5139                         }
5140                         i = 0;
5141
5142                         hclge_cmd_setup_basic_desc(&desc,
5143                                                    op,
5144                                                    false);
5145                         req->int_vector_id_l =
5146                                 hnae3_get_field(vector_id,
5147                                                 HCLGE_VECTOR_ID_L_M,
5148                                                 HCLGE_VECTOR_ID_L_S);
5149                         req->int_vector_id_h =
5150                                 hnae3_get_field(vector_id,
5151                                                 HCLGE_VECTOR_ID_H_M,
5152                                                 HCLGE_VECTOR_ID_H_S);
5153                 }
5154         }
5155
5156         if (i > 0) {
5157                 req->int_cause_num = i;
5158                 req->vfid = vport->vport_id;
5159                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5160                 if (status) {
5161                         dev_err(&hdev->pdev->dev,
5162                                 "Map TQP fail, status is %d.\n", status);
5163                         return -EIO;
5164                 }
5165         }
5166
5167         return 0;
5168 }
5169
5170 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5171                                     struct hnae3_ring_chain_node *ring_chain)
5172 {
5173         struct hclge_vport *vport = hclge_get_vport(handle);
5174         struct hclge_dev *hdev = vport->back;
5175         int vector_id;
5176
5177         vector_id = hclge_get_vector_index(hdev, vector);
5178         if (vector_id < 0) {
5179                 dev_err(&hdev->pdev->dev,
5180                         "failed to get vector index. vector=%d\n", vector);
5181                 return vector_id;
5182         }
5183
5184         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5185 }
5186
5187 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5188                                        struct hnae3_ring_chain_node *ring_chain)
5189 {
5190         struct hclge_vport *vport = hclge_get_vport(handle);
5191         struct hclge_dev *hdev = vport->back;
5192         int vector_id, ret;
5193
5194         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5195                 return 0;
5196
5197         vector_id = hclge_get_vector_index(hdev, vector);
5198         if (vector_id < 0) {
5199                 dev_err(&handle->pdev->dev,
5200                         "Get vector index fail. ret =%d\n", vector_id);
5201                 return vector_id;
5202         }
5203
5204         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5205         if (ret)
5206                 dev_err(&handle->pdev->dev,
5207                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5208                         vector_id, ret);
5209
5210         return ret;
5211 }
5212
5213 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5214                                       bool en_uc, bool en_mc, bool en_bc)
5215 {
5216         struct hclge_vport *vport = &hdev->vport[vf_id];
5217         struct hnae3_handle *handle = &vport->nic;
5218         struct hclge_promisc_cfg_cmd *req;
5219         struct hclge_desc desc;
5220         bool uc_tx_en = en_uc;
5221         u8 promisc_cfg = 0;
5222         int ret;
5223
5224         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5225
5226         req = (struct hclge_promisc_cfg_cmd *)desc.data;
5227         req->vf_id = vf_id;
5228
5229         if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5230                 uc_tx_en = false;
5231
5232         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5233         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5234         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5235         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5236         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5237         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5238         req->extend_promisc = promisc_cfg;
5239
5240         /* to be compatible with DEVICE_VERSION_V1/2 */
5241         promisc_cfg = 0;
5242         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5243         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5244         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5245         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5246         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5247         req->promisc = promisc_cfg;
5248
5249         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5250         if (ret)
5251                 dev_err(&hdev->pdev->dev,
5252                         "failed to set vport %u promisc mode, ret = %d.\n",
5253                         vf_id, ret);
5254
5255         return ret;
5256 }
5257
5258 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5259                                  bool en_mc_pmc, bool en_bc_pmc)
5260 {
5261         return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5262                                           en_uc_pmc, en_mc_pmc, en_bc_pmc);
5263 }
5264
5265 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5266                                   bool en_mc_pmc)
5267 {
5268         struct hclge_vport *vport = hclge_get_vport(handle);
5269         struct hclge_dev *hdev = vport->back;
5270         bool en_bc_pmc = true;
5271
5272         /* For device whose version below V2, if broadcast promisc enabled,
5273          * vlan filter is always bypassed. So broadcast promisc should be
5274          * disabled until user enable promisc mode
5275          */
5276         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5277                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5278
5279         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5280                                             en_bc_pmc);
5281 }
5282
5283 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5284 {
5285         struct hclge_vport *vport = hclge_get_vport(handle);
5286
5287         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5288 }
5289
5290 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5291 {
5292         if (hlist_empty(&hdev->fd_rule_list))
5293                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5294 }
5295
5296 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5297 {
5298         if (!test_bit(location, hdev->fd_bmap)) {
5299                 set_bit(location, hdev->fd_bmap);
5300                 hdev->hclge_fd_rule_num++;
5301         }
5302 }
5303
5304 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5305 {
5306         if (test_bit(location, hdev->fd_bmap)) {
5307                 clear_bit(location, hdev->fd_bmap);
5308                 hdev->hclge_fd_rule_num--;
5309         }
5310 }
5311
5312 static void hclge_fd_free_node(struct hclge_dev *hdev,
5313                                struct hclge_fd_rule *rule)
5314 {
5315         hlist_del(&rule->rule_node);
5316         kfree(rule);
5317         hclge_sync_fd_state(hdev);
5318 }
5319
5320 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5321                                       struct hclge_fd_rule *old_rule,
5322                                       struct hclge_fd_rule *new_rule,
5323                                       enum HCLGE_FD_NODE_STATE state)
5324 {
5325         switch (state) {
5326         case HCLGE_FD_TO_ADD:
5327         case HCLGE_FD_ACTIVE:
5328                 /* 1) if the new state is TO_ADD, just replace the old rule
5329                  * with the same location, no matter its state, because the
5330                  * new rule will be configured to the hardware.
5331                  * 2) if the new state is ACTIVE, it means the new rule
5332                  * has been configured to the hardware, so just replace
5333                  * the old rule node with the same location.
5334                  * 3) for it doesn't add a new node to the list, so it's
5335                  * unnecessary to update the rule number and fd_bmap.
5336                  */
5337                 new_rule->rule_node.next = old_rule->rule_node.next;
5338                 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5339                 memcpy(old_rule, new_rule, sizeof(*old_rule));
5340                 kfree(new_rule);
5341                 break;
5342         case HCLGE_FD_DELETED:
5343                 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5344                 hclge_fd_free_node(hdev, old_rule);
5345                 break;
5346         case HCLGE_FD_TO_DEL:
5347                 /* if new request is TO_DEL, and old rule is existent
5348                  * 1) the state of old rule is TO_DEL, we need do nothing,
5349                  * because we delete rule by location, other rule content
5350                  * is unncessary.
5351                  * 2) the state of old rule is ACTIVE, we need to change its
5352                  * state to TO_DEL, so the rule will be deleted when periodic
5353                  * task being scheduled.
5354                  * 3) the state of old rule is TO_ADD, it means the rule hasn't
5355                  * been added to hardware, so we just delete the rule node from
5356                  * fd_rule_list directly.
5357                  */
5358                 if (old_rule->state == HCLGE_FD_TO_ADD) {
5359                         hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5360                         hclge_fd_free_node(hdev, old_rule);
5361                         return;
5362                 }
5363                 old_rule->state = HCLGE_FD_TO_DEL;
5364                 break;
5365         }
5366 }
5367
5368 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5369                                                 u16 location,
5370                                                 struct hclge_fd_rule **parent)
5371 {
5372         struct hclge_fd_rule *rule;
5373         struct hlist_node *node;
5374
5375         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5376                 if (rule->location == location)
5377                         return rule;
5378                 else if (rule->location > location)
5379                         return NULL;
5380                 /* record the parent node, use to keep the nodes in fd_rule_list
5381                  * in ascend order.
5382                  */
5383                 *parent = rule;
5384         }
5385
5386         return NULL;
5387 }
5388
5389 /* insert fd rule node in ascend order according to rule->location */
5390 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5391                                       struct hclge_fd_rule *rule,
5392                                       struct hclge_fd_rule *parent)
5393 {
5394         INIT_HLIST_NODE(&rule->rule_node);
5395
5396         if (parent)
5397                 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5398         else
5399                 hlist_add_head(&rule->rule_node, hlist);
5400 }
5401
5402 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5403                                      struct hclge_fd_user_def_cfg *cfg)
5404 {
5405         struct hclge_fd_user_def_cfg_cmd *req;
5406         struct hclge_desc desc;
5407         u16 data = 0;
5408         int ret;
5409
5410         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5411
5412         req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5413
5414         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5415         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5416                         HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5417         req->ol2_cfg = cpu_to_le16(data);
5418
5419         data = 0;
5420         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5421         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5422                         HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5423         req->ol3_cfg = cpu_to_le16(data);
5424
5425         data = 0;
5426         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5427         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5428                         HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5429         req->ol4_cfg = cpu_to_le16(data);
5430
5431         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5432         if (ret)
5433                 dev_err(&hdev->pdev->dev,
5434                         "failed to set fd user def data, ret= %d\n", ret);
5435         return ret;
5436 }
5437
5438 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5439 {
5440         int ret;
5441
5442         if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5443                 return;
5444
5445         if (!locked)
5446                 spin_lock_bh(&hdev->fd_rule_lock);
5447
5448         ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5449         if (ret)
5450                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5451
5452         if (!locked)
5453                 spin_unlock_bh(&hdev->fd_rule_lock);
5454 }
5455
5456 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5457                                           struct hclge_fd_rule *rule)
5458 {
5459         struct hlist_head *hlist = &hdev->fd_rule_list;
5460         struct hclge_fd_rule *fd_rule, *parent = NULL;
5461         struct hclge_fd_user_def_info *info, *old_info;
5462         struct hclge_fd_user_def_cfg *cfg;
5463
5464         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5465             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5466                 return 0;
5467
5468         /* for valid layer is start from 1, so need minus 1 to get the cfg */
5469         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5470         info = &rule->ep.user_def;
5471
5472         if (!cfg->ref_cnt || cfg->offset == info->offset)
5473                 return 0;
5474
5475         if (cfg->ref_cnt > 1)
5476                 goto error;
5477
5478         fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5479         if (fd_rule) {
5480                 old_info = &fd_rule->ep.user_def;
5481                 if (info->layer == old_info->layer)
5482                         return 0;
5483         }
5484
5485 error:
5486         dev_err(&hdev->pdev->dev,
5487                 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5488                 info->layer + 1);
5489         return -ENOSPC;
5490 }
5491
5492 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5493                                          struct hclge_fd_rule *rule)
5494 {
5495         struct hclge_fd_user_def_cfg *cfg;
5496
5497         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5498             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5499                 return;
5500
5501         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5502         if (!cfg->ref_cnt) {
5503                 cfg->offset = rule->ep.user_def.offset;
5504                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5505         }
5506         cfg->ref_cnt++;
5507 }
5508
5509 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5510                                          struct hclge_fd_rule *rule)
5511 {
5512         struct hclge_fd_user_def_cfg *cfg;
5513
5514         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5515             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5516                 return;
5517
5518         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5519         if (!cfg->ref_cnt)
5520                 return;
5521
5522         cfg->ref_cnt--;
5523         if (!cfg->ref_cnt) {
5524                 cfg->offset = 0;
5525                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5526         }
5527 }
5528
5529 static void hclge_update_fd_list(struct hclge_dev *hdev,
5530                                  enum HCLGE_FD_NODE_STATE state, u16 location,
5531                                  struct hclge_fd_rule *new_rule)
5532 {
5533         struct hlist_head *hlist = &hdev->fd_rule_list;
5534         struct hclge_fd_rule *fd_rule, *parent = NULL;
5535
5536         fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5537         if (fd_rule) {
5538                 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5539                 if (state == HCLGE_FD_ACTIVE)
5540                         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5541                 hclge_sync_fd_user_def_cfg(hdev, true);
5542
5543                 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5544                 return;
5545         }
5546
5547         /* it's unlikely to fail here, because we have checked the rule
5548          * exist before.
5549          */
5550         if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5551                 dev_warn(&hdev->pdev->dev,
5552                          "failed to delete fd rule %u, it's inexistent\n",
5553                          location);
5554                 return;
5555         }
5556
5557         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5558         hclge_sync_fd_user_def_cfg(hdev, true);
5559
5560         hclge_fd_insert_rule_node(hlist, new_rule, parent);
5561         hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5562
5563         if (state == HCLGE_FD_TO_ADD) {
5564                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5565                 hclge_task_schedule(hdev, 0);
5566         }
5567 }
5568
5569 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5570 {
5571         struct hclge_get_fd_mode_cmd *req;
5572         struct hclge_desc desc;
5573         int ret;
5574
5575         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5576
5577         req = (struct hclge_get_fd_mode_cmd *)desc.data;
5578
5579         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5580         if (ret) {
5581                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5582                 return ret;
5583         }
5584
5585         *fd_mode = req->mode;
5586
5587         return ret;
5588 }
5589
5590 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5591                                    u32 *stage1_entry_num,
5592                                    u32 *stage2_entry_num,
5593                                    u16 *stage1_counter_num,
5594                                    u16 *stage2_counter_num)
5595 {
5596         struct hclge_get_fd_allocation_cmd *req;
5597         struct hclge_desc desc;
5598         int ret;
5599
5600         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5601
5602         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5603
5604         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5605         if (ret) {
5606                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5607                         ret);
5608                 return ret;
5609         }
5610
5611         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5612         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5613         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5614         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5615
5616         return ret;
5617 }
5618
5619 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5620                                    enum HCLGE_FD_STAGE stage_num)
5621 {
5622         struct hclge_set_fd_key_config_cmd *req;
5623         struct hclge_fd_key_cfg *stage;
5624         struct hclge_desc desc;
5625         int ret;
5626
5627         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5628
5629         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5630         stage = &hdev->fd_cfg.key_cfg[stage_num];
5631         req->stage = stage_num;
5632         req->key_select = stage->key_sel;
5633         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5634         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5635         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5636         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5637         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5638         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5639
5640         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5641         if (ret)
5642                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5643
5644         return ret;
5645 }
5646
5647 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5648 {
5649         struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5650
5651         spin_lock_bh(&hdev->fd_rule_lock);
5652         memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5653         spin_unlock_bh(&hdev->fd_rule_lock);
5654
5655         hclge_fd_set_user_def_cmd(hdev, cfg);
5656 }
5657
5658 static int hclge_init_fd_config(struct hclge_dev *hdev)
5659 {
5660 #define LOW_2_WORDS             0x03
5661         struct hclge_fd_key_cfg *key_cfg;
5662         int ret;
5663
5664         if (!hnae3_dev_fd_supported(hdev))
5665                 return 0;
5666
5667         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5668         if (ret)
5669                 return ret;
5670
5671         switch (hdev->fd_cfg.fd_mode) {
5672         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5673                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5674                 break;
5675         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5676                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5677                 break;
5678         default:
5679                 dev_err(&hdev->pdev->dev,
5680                         "Unsupported flow director mode %u\n",
5681                         hdev->fd_cfg.fd_mode);
5682                 return -EOPNOTSUPP;
5683         }
5684
5685         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5686         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5687         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5688         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5689         key_cfg->outer_sipv6_word_en = 0;
5690         key_cfg->outer_dipv6_word_en = 0;
5691
5692         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5693                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5694                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5695                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5696
5697         /* If use max 400bit key, we can support tuples for ether type */
5698         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5699                 key_cfg->tuple_active |=
5700                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5701                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5702                         key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5703         }
5704
5705         /* roce_type is used to filter roce frames
5706          * dst_vport is used to specify the rule
5707          */
5708         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5709
5710         ret = hclge_get_fd_allocation(hdev,
5711                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5712                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5713                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5714                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5715         if (ret)
5716                 return ret;
5717
5718         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5719 }
5720
5721 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5722                                 int loc, u8 *key, bool is_add)
5723 {
5724         struct hclge_fd_tcam_config_1_cmd *req1;
5725         struct hclge_fd_tcam_config_2_cmd *req2;
5726         struct hclge_fd_tcam_config_3_cmd *req3;
5727         struct hclge_desc desc[3];
5728         int ret;
5729
5730         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5731         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5732         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5733         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5734         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5735
5736         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5737         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5738         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5739
5740         req1->stage = stage;
5741         req1->xy_sel = sel_x ? 1 : 0;
5742         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5743         req1->index = cpu_to_le32(loc);
5744         req1->entry_vld = sel_x ? is_add : 0;
5745
5746         if (key) {
5747                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5748                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5749                        sizeof(req2->tcam_data));
5750                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5751                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5752         }
5753
5754         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5755         if (ret)
5756                 dev_err(&hdev->pdev->dev,
5757                         "config tcam key fail, ret=%d\n",
5758                         ret);
5759
5760         return ret;
5761 }
5762
5763 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5764                               struct hclge_fd_ad_data *action)
5765 {
5766         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5767         struct hclge_fd_ad_config_cmd *req;
5768         struct hclge_desc desc;
5769         u64 ad_data = 0;
5770         int ret;
5771
5772         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5773
5774         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5775         req->index = cpu_to_le32(loc);
5776         req->stage = stage;
5777
5778         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5779                       action->write_rule_id_to_bd);
5780         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5781                         action->rule_id);
5782         if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5783                 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5784                               action->override_tc);
5785                 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5786                                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5787         }
5788         ad_data <<= 32;
5789         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5790         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5791                       action->forward_to_direct_queue);
5792         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5793                         action->queue_id);
5794         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5795         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5796                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5797         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5798         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5799                         action->counter_id);
5800
5801         req->ad_data = cpu_to_le64(ad_data);
5802         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5803         if (ret)
5804                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5805
5806         return ret;
5807 }
5808
5809 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5810                                    struct hclge_fd_rule *rule)
5811 {
5812         int offset, moffset, ip_offset;
5813         enum HCLGE_FD_KEY_OPT key_opt;
5814         u16 tmp_x_s, tmp_y_s;
5815         u32 tmp_x_l, tmp_y_l;
5816         u8 *p = (u8 *)rule;
5817         int i;
5818
5819         if (rule->unused_tuple & BIT(tuple_bit))
5820                 return true;
5821
5822         key_opt = tuple_key_info[tuple_bit].key_opt;
5823         offset = tuple_key_info[tuple_bit].offset;
5824         moffset = tuple_key_info[tuple_bit].moffset;
5825
5826         switch (key_opt) {
5827         case KEY_OPT_U8:
5828                 calc_x(*key_x, p[offset], p[moffset]);
5829                 calc_y(*key_y, p[offset], p[moffset]);
5830
5831                 return true;
5832         case KEY_OPT_LE16:
5833                 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5834                 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5835                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5836                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5837
5838                 return true;
5839         case KEY_OPT_LE32:
5840                 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5841                 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5842                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5843                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5844
5845                 return true;
5846         case KEY_OPT_MAC:
5847                 for (i = 0; i < ETH_ALEN; i++) {
5848                         calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5849                                p[moffset + i]);
5850                         calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5851                                p[moffset + i]);
5852                 }
5853
5854                 return true;
5855         case KEY_OPT_IP:
5856                 ip_offset = IPV4_INDEX * sizeof(u32);
5857                 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5858                        *(u32 *)(&p[moffset + ip_offset]));
5859                 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5860                        *(u32 *)(&p[moffset + ip_offset]));
5861                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5862                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5863
5864                 return true;
5865         default:
5866                 return false;
5867         }
5868 }
5869
5870 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5871                                  u8 vf_id, u8 network_port_id)
5872 {
5873         u32 port_number = 0;
5874
5875         if (port_type == HOST_PORT) {
5876                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5877                                 pf_id);
5878                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5879                                 vf_id);
5880                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5881         } else {
5882                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5883                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5884                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5885         }
5886
5887         return port_number;
5888 }
5889
5890 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5891                                        __le32 *key_x, __le32 *key_y,
5892                                        struct hclge_fd_rule *rule)
5893 {
5894         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5895         u8 cur_pos = 0, tuple_size, shift_bits;
5896         unsigned int i;
5897
5898         for (i = 0; i < MAX_META_DATA; i++) {
5899                 tuple_size = meta_data_key_info[i].key_length;
5900                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5901
5902                 switch (tuple_bit) {
5903                 case BIT(ROCE_TYPE):
5904                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5905                         cur_pos += tuple_size;
5906                         break;
5907                 case BIT(DST_VPORT):
5908                         port_number = hclge_get_port_number(HOST_PORT, 0,
5909                                                             rule->vf_id, 0);
5910                         hnae3_set_field(meta_data,
5911                                         GENMASK(cur_pos + tuple_size, cur_pos),
5912                                         cur_pos, port_number);
5913                         cur_pos += tuple_size;
5914                         break;
5915                 default:
5916                         break;
5917                 }
5918         }
5919
5920         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5921         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5922         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5923
5924         *key_x = cpu_to_le32(tmp_x << shift_bits);
5925         *key_y = cpu_to_le32(tmp_y << shift_bits);
5926 }
5927
5928 /* A complete key is combined with meta data key and tuple key.
5929  * Meta data key is stored at the MSB region, and tuple key is stored at
5930  * the LSB region, unused bits will be filled 0.
5931  */
5932 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5933                             struct hclge_fd_rule *rule)
5934 {
5935         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5936         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5937         u8 *cur_key_x, *cur_key_y;
5938         u8 meta_data_region;
5939         u8 tuple_size;
5940         int ret;
5941         u32 i;
5942
5943         memset(key_x, 0, sizeof(key_x));
5944         memset(key_y, 0, sizeof(key_y));
5945         cur_key_x = key_x;
5946         cur_key_y = key_y;
5947
5948         for (i = 0 ; i < MAX_TUPLE; i++) {
5949                 bool tuple_valid;
5950
5951                 tuple_size = tuple_key_info[i].key_length / 8;
5952                 if (!(key_cfg->tuple_active & BIT(i)))
5953                         continue;
5954
5955                 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5956                                                      cur_key_y, rule);
5957                 if (tuple_valid) {
5958                         cur_key_x += tuple_size;
5959                         cur_key_y += tuple_size;
5960                 }
5961         }
5962
5963         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5964                         MAX_META_DATA_LENGTH / 8;
5965
5966         hclge_fd_convert_meta_data(key_cfg,
5967                                    (__le32 *)(key_x + meta_data_region),
5968                                    (__le32 *)(key_y + meta_data_region),
5969                                    rule);
5970
5971         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5972                                    true);
5973         if (ret) {
5974                 dev_err(&hdev->pdev->dev,
5975                         "fd key_y config fail, loc=%u, ret=%d\n",
5976                         rule->queue_id, ret);
5977                 return ret;
5978         }
5979
5980         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5981                                    true);
5982         if (ret)
5983                 dev_err(&hdev->pdev->dev,
5984                         "fd key_x config fail, loc=%u, ret=%d\n",
5985                         rule->queue_id, ret);
5986         return ret;
5987 }
5988
5989 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5990                                struct hclge_fd_rule *rule)
5991 {
5992         struct hclge_vport *vport = hdev->vport;
5993         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5994         struct hclge_fd_ad_data ad_data;
5995
5996         memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5997         ad_data.ad_id = rule->location;
5998
5999         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6000                 ad_data.drop_packet = true;
6001         } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6002                 ad_data.override_tc = true;
6003                 ad_data.queue_id =
6004                         kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6005                 ad_data.tc_size =
6006                         ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6007         } else {
6008                 ad_data.forward_to_direct_queue = true;
6009                 ad_data.queue_id = rule->queue_id;
6010         }
6011
6012         if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6013                 ad_data.use_counter = true;
6014                 ad_data.counter_id = rule->vf_id %
6015                                      hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6016         } else {
6017                 ad_data.use_counter = false;
6018                 ad_data.counter_id = 0;
6019         }
6020
6021         ad_data.use_next_stage = false;
6022         ad_data.next_input_key = 0;
6023
6024         ad_data.write_rule_id_to_bd = true;
6025         ad_data.rule_id = rule->location;
6026
6027         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6028 }
6029
6030 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6031                                        u32 *unused_tuple)
6032 {
6033         if (!spec || !unused_tuple)
6034                 return -EINVAL;
6035
6036         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6037
6038         if (!spec->ip4src)
6039                 *unused_tuple |= BIT(INNER_SRC_IP);
6040
6041         if (!spec->ip4dst)
6042                 *unused_tuple |= BIT(INNER_DST_IP);
6043
6044         if (!spec->psrc)
6045                 *unused_tuple |= BIT(INNER_SRC_PORT);
6046
6047         if (!spec->pdst)
6048                 *unused_tuple |= BIT(INNER_DST_PORT);
6049
6050         if (!spec->tos)
6051                 *unused_tuple |= BIT(INNER_IP_TOS);
6052
6053         return 0;
6054 }
6055
6056 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6057                                     u32 *unused_tuple)
6058 {
6059         if (!spec || !unused_tuple)
6060                 return -EINVAL;
6061
6062         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6063                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6064
6065         if (!spec->ip4src)
6066                 *unused_tuple |= BIT(INNER_SRC_IP);
6067
6068         if (!spec->ip4dst)
6069                 *unused_tuple |= BIT(INNER_DST_IP);
6070
6071         if (!spec->tos)
6072                 *unused_tuple |= BIT(INNER_IP_TOS);
6073
6074         if (!spec->proto)
6075                 *unused_tuple |= BIT(INNER_IP_PROTO);
6076
6077         if (spec->l4_4_bytes)
6078                 return -EOPNOTSUPP;
6079
6080         if (spec->ip_ver != ETH_RX_NFC_IP4)
6081                 return -EOPNOTSUPP;
6082
6083         return 0;
6084 }
6085
6086 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6087                                        u32 *unused_tuple)
6088 {
6089         if (!spec || !unused_tuple)
6090                 return -EINVAL;
6091
6092         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6093
6094         /* check whether src/dst ip address used */
6095         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6096                 *unused_tuple |= BIT(INNER_SRC_IP);
6097
6098         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6099                 *unused_tuple |= BIT(INNER_DST_IP);
6100
6101         if (!spec->psrc)
6102                 *unused_tuple |= BIT(INNER_SRC_PORT);
6103
6104         if (!spec->pdst)
6105                 *unused_tuple |= BIT(INNER_DST_PORT);
6106
6107         if (!spec->tclass)
6108                 *unused_tuple |= BIT(INNER_IP_TOS);
6109
6110         return 0;
6111 }
6112
6113 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6114                                     u32 *unused_tuple)
6115 {
6116         if (!spec || !unused_tuple)
6117                 return -EINVAL;
6118
6119         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6120                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6121
6122         /* check whether src/dst ip address used */
6123         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6124                 *unused_tuple |= BIT(INNER_SRC_IP);
6125
6126         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6127                 *unused_tuple |= BIT(INNER_DST_IP);
6128
6129         if (!spec->l4_proto)
6130                 *unused_tuple |= BIT(INNER_IP_PROTO);
6131
6132         if (!spec->tclass)
6133                 *unused_tuple |= BIT(INNER_IP_TOS);
6134
6135         if (spec->l4_4_bytes)
6136                 return -EOPNOTSUPP;
6137
6138         return 0;
6139 }
6140
6141 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6142 {
6143         if (!spec || !unused_tuple)
6144                 return -EINVAL;
6145
6146         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6147                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6148                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6149
6150         if (is_zero_ether_addr(spec->h_source))
6151                 *unused_tuple |= BIT(INNER_SRC_MAC);
6152
6153         if (is_zero_ether_addr(spec->h_dest))
6154                 *unused_tuple |= BIT(INNER_DST_MAC);
6155
6156         if (!spec->h_proto)
6157                 *unused_tuple |= BIT(INNER_ETH_TYPE);
6158
6159         return 0;
6160 }
6161
6162 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6163                                     struct ethtool_rx_flow_spec *fs,
6164                                     u32 *unused_tuple)
6165 {
6166         if (fs->flow_type & FLOW_EXT) {
6167                 if (fs->h_ext.vlan_etype) {
6168                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6169                         return -EOPNOTSUPP;
6170                 }
6171
6172                 if (!fs->h_ext.vlan_tci)
6173                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6174
6175                 if (fs->m_ext.vlan_tci &&
6176                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6177                         dev_err(&hdev->pdev->dev,
6178                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6179                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6180                         return -EINVAL;
6181                 }
6182         } else {
6183                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6184         }
6185
6186         if (fs->flow_type & FLOW_MAC_EXT) {
6187                 if (hdev->fd_cfg.fd_mode !=
6188                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6189                         dev_err(&hdev->pdev->dev,
6190                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6191                         return -EOPNOTSUPP;
6192                 }
6193
6194                 if (is_zero_ether_addr(fs->h_ext.h_dest))
6195                         *unused_tuple |= BIT(INNER_DST_MAC);
6196                 else
6197                         *unused_tuple &= ~BIT(INNER_DST_MAC);
6198         }
6199
6200         return 0;
6201 }
6202
6203 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6204                                        struct hclge_fd_user_def_info *info)
6205 {
6206         switch (flow_type) {
6207         case ETHER_FLOW:
6208                 info->layer = HCLGE_FD_USER_DEF_L2;
6209                 *unused_tuple &= ~BIT(INNER_L2_RSV);
6210                 break;
6211         case IP_USER_FLOW:
6212         case IPV6_USER_FLOW:
6213                 info->layer = HCLGE_FD_USER_DEF_L3;
6214                 *unused_tuple &= ~BIT(INNER_L3_RSV);
6215                 break;
6216         case TCP_V4_FLOW:
6217         case UDP_V4_FLOW:
6218         case TCP_V6_FLOW:
6219         case UDP_V6_FLOW:
6220                 info->layer = HCLGE_FD_USER_DEF_L4;
6221                 *unused_tuple &= ~BIT(INNER_L4_RSV);
6222                 break;
6223         default:
6224                 return -EOPNOTSUPP;
6225         }
6226
6227         return 0;
6228 }
6229
6230 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6231 {
6232         return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6233 }
6234
6235 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6236                                          struct ethtool_rx_flow_spec *fs,
6237                                          u32 *unused_tuple,
6238                                          struct hclge_fd_user_def_info *info)
6239 {
6240         u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6241         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6242         u16 data, offset, data_mask, offset_mask;
6243         int ret;
6244
6245         info->layer = HCLGE_FD_USER_DEF_NONE;
6246         *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6247
6248         if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6249                 return 0;
6250
6251         /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6252          * for data, and bit32~47 is used for offset.
6253          */
6254         data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6255         data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6256         offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6257         offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6258
6259         if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6260                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6261                 return -EOPNOTSUPP;
6262         }
6263
6264         if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6265                 dev_err(&hdev->pdev->dev,
6266                         "user-def offset[%u] should be no more than %u\n",
6267                         offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6268                 return -EINVAL;
6269         }
6270
6271         if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6272                 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6273                 return -EINVAL;
6274         }
6275
6276         ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6277         if (ret) {
6278                 dev_err(&hdev->pdev->dev,
6279                         "unsupported flow type for user-def bytes, ret = %d\n",
6280                         ret);
6281                 return ret;
6282         }
6283
6284         info->data = data;
6285         info->data_mask = data_mask;
6286         info->offset = offset;
6287
6288         return 0;
6289 }
6290
6291 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6292                                struct ethtool_rx_flow_spec *fs,
6293                                u32 *unused_tuple,
6294                                struct hclge_fd_user_def_info *info)
6295 {
6296         u32 flow_type;
6297         int ret;
6298
6299         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6300                 dev_err(&hdev->pdev->dev,
6301                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6302                         fs->location,
6303                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6304                 return -EINVAL;
6305         }
6306
6307         ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6308         if (ret)
6309                 return ret;
6310
6311         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6312         switch (flow_type) {
6313         case SCTP_V4_FLOW:
6314         case TCP_V4_FLOW:
6315         case UDP_V4_FLOW:
6316                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6317                                                   unused_tuple);
6318                 break;
6319         case IP_USER_FLOW:
6320                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6321                                                unused_tuple);
6322                 break;
6323         case SCTP_V6_FLOW:
6324         case TCP_V6_FLOW:
6325         case UDP_V6_FLOW:
6326                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6327                                                   unused_tuple);
6328                 break;
6329         case IPV6_USER_FLOW:
6330                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6331                                                unused_tuple);
6332                 break;
6333         case ETHER_FLOW:
6334                 if (hdev->fd_cfg.fd_mode !=
6335                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6336                         dev_err(&hdev->pdev->dev,
6337                                 "ETHER_FLOW is not supported in current fd mode!\n");
6338                         return -EOPNOTSUPP;
6339                 }
6340
6341                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6342                                                  unused_tuple);
6343                 break;
6344         default:
6345                 dev_err(&hdev->pdev->dev,
6346                         "unsupported protocol type, protocol type = %#x\n",
6347                         flow_type);
6348                 return -EOPNOTSUPP;
6349         }
6350
6351         if (ret) {
6352                 dev_err(&hdev->pdev->dev,
6353                         "failed to check flow union tuple, ret = %d\n",
6354                         ret);
6355                 return ret;
6356         }
6357
6358         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6359 }
6360
6361 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6362                                       struct ethtool_rx_flow_spec *fs,
6363                                       struct hclge_fd_rule *rule, u8 ip_proto)
6364 {
6365         rule->tuples.src_ip[IPV4_INDEX] =
6366                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6367         rule->tuples_mask.src_ip[IPV4_INDEX] =
6368                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6369
6370         rule->tuples.dst_ip[IPV4_INDEX] =
6371                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6372         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6373                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6374
6375         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6376         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6377
6378         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6379         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6380
6381         rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6382         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6383
6384         rule->tuples.ether_proto = ETH_P_IP;
6385         rule->tuples_mask.ether_proto = 0xFFFF;
6386
6387         rule->tuples.ip_proto = ip_proto;
6388         rule->tuples_mask.ip_proto = 0xFF;
6389 }
6390
6391 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6392                                    struct ethtool_rx_flow_spec *fs,
6393                                    struct hclge_fd_rule *rule)
6394 {
6395         rule->tuples.src_ip[IPV4_INDEX] =
6396                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6397         rule->tuples_mask.src_ip[IPV4_INDEX] =
6398                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6399
6400         rule->tuples.dst_ip[IPV4_INDEX] =
6401                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6402         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6403                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6404
6405         rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6406         rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6407
6408         rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6409         rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6410
6411         rule->tuples.ether_proto = ETH_P_IP;
6412         rule->tuples_mask.ether_proto = 0xFFFF;
6413 }
6414
6415 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6416                                       struct ethtool_rx_flow_spec *fs,
6417                                       struct hclge_fd_rule *rule, u8 ip_proto)
6418 {
6419         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6420                           IPV6_SIZE);
6421         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6422                           IPV6_SIZE);
6423
6424         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6425                           IPV6_SIZE);
6426         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6427                           IPV6_SIZE);
6428
6429         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6430         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6431
6432         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6433         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6434
6435         rule->tuples.ether_proto = ETH_P_IPV6;
6436         rule->tuples_mask.ether_proto = 0xFFFF;
6437
6438         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6439         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6440
6441         rule->tuples.ip_proto = ip_proto;
6442         rule->tuples_mask.ip_proto = 0xFF;
6443 }
6444
6445 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6446                                    struct ethtool_rx_flow_spec *fs,
6447                                    struct hclge_fd_rule *rule)
6448 {
6449         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6450                           IPV6_SIZE);
6451         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6452                           IPV6_SIZE);
6453
6454         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6455                           IPV6_SIZE);
6456         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6457                           IPV6_SIZE);
6458
6459         rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6460         rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6461
6462         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6463         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6464
6465         rule->tuples.ether_proto = ETH_P_IPV6;
6466         rule->tuples_mask.ether_proto = 0xFFFF;
6467 }
6468
6469 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6470                                      struct ethtool_rx_flow_spec *fs,
6471                                      struct hclge_fd_rule *rule)
6472 {
6473         ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6474         ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6475
6476         ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6477         ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6478
6479         rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6480         rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6481 }
6482
6483 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6484                                         struct hclge_fd_rule *rule)
6485 {
6486         switch (info->layer) {
6487         case HCLGE_FD_USER_DEF_L2:
6488                 rule->tuples.l2_user_def = info->data;
6489                 rule->tuples_mask.l2_user_def = info->data_mask;
6490                 break;
6491         case HCLGE_FD_USER_DEF_L3:
6492                 rule->tuples.l3_user_def = info->data;
6493                 rule->tuples_mask.l3_user_def = info->data_mask;
6494                 break;
6495         case HCLGE_FD_USER_DEF_L4:
6496                 rule->tuples.l4_user_def = (u32)info->data << 16;
6497                 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6498                 break;
6499         default:
6500                 break;
6501         }
6502
6503         rule->ep.user_def = *info;
6504 }
6505
6506 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6507                               struct ethtool_rx_flow_spec *fs,
6508                               struct hclge_fd_rule *rule,
6509                               struct hclge_fd_user_def_info *info)
6510 {
6511         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6512
6513         switch (flow_type) {
6514         case SCTP_V4_FLOW:
6515                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6516                 break;
6517         case TCP_V4_FLOW:
6518                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6519                 break;
6520         case UDP_V4_FLOW:
6521                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6522                 break;
6523         case IP_USER_FLOW:
6524                 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6525                 break;
6526         case SCTP_V6_FLOW:
6527                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6528                 break;
6529         case TCP_V6_FLOW:
6530                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6531                 break;
6532         case UDP_V6_FLOW:
6533                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6534                 break;
6535         case IPV6_USER_FLOW:
6536                 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6537                 break;
6538         case ETHER_FLOW:
6539                 hclge_fd_get_ether_tuple(hdev, fs, rule);
6540                 break;
6541         default:
6542                 return -EOPNOTSUPP;
6543         }
6544
6545         if (fs->flow_type & FLOW_EXT) {
6546                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6547                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6548                 hclge_fd_get_user_def_tuple(info, rule);
6549         }
6550
6551         if (fs->flow_type & FLOW_MAC_EXT) {
6552                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6553                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6554         }
6555
6556         return 0;
6557 }
6558
6559 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6560                                 struct hclge_fd_rule *rule)
6561 {
6562         int ret;
6563
6564         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6565         if (ret)
6566                 return ret;
6567
6568         return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6569 }
6570
6571 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6572                                      struct hclge_fd_rule *rule)
6573 {
6574         int ret;
6575
6576         spin_lock_bh(&hdev->fd_rule_lock);
6577
6578         if (hdev->fd_active_type != rule->rule_type &&
6579             (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6580              hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6581                 dev_err(&hdev->pdev->dev,
6582                         "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6583                         rule->rule_type, hdev->fd_active_type);
6584                 spin_unlock_bh(&hdev->fd_rule_lock);
6585                 return -EINVAL;
6586         }
6587
6588         ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6589         if (ret)
6590                 goto out;
6591
6592         ret = hclge_clear_arfs_rules(hdev);
6593         if (ret)
6594                 goto out;
6595
6596         ret = hclge_fd_config_rule(hdev, rule);
6597         if (ret)
6598                 goto out;
6599
6600         rule->state = HCLGE_FD_ACTIVE;
6601         hdev->fd_active_type = rule->rule_type;
6602         hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6603
6604 out:
6605         spin_unlock_bh(&hdev->fd_rule_lock);
6606         return ret;
6607 }
6608
6609 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6610 {
6611         struct hclge_vport *vport = hclge_get_vport(handle);
6612         struct hclge_dev *hdev = vport->back;
6613
6614         return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6615 }
6616
6617 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6618                                       u16 *vport_id, u8 *action, u16 *queue_id)
6619 {
6620         struct hclge_vport *vport = hdev->vport;
6621
6622         if (ring_cookie == RX_CLS_FLOW_DISC) {
6623                 *action = HCLGE_FD_ACTION_DROP_PACKET;
6624         } else {
6625                 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6626                 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6627                 u16 tqps;
6628
6629                 if (vf > hdev->num_req_vfs) {
6630                         dev_err(&hdev->pdev->dev,
6631                                 "Error: vf id (%u) > max vf num (%u)\n",
6632                                 vf, hdev->num_req_vfs);
6633                         return -EINVAL;
6634                 }
6635
6636                 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6637                 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6638
6639                 if (ring >= tqps) {
6640                         dev_err(&hdev->pdev->dev,
6641                                 "Error: queue id (%u) > max tqp num (%u)\n",
6642                                 ring, tqps - 1);
6643                         return -EINVAL;
6644                 }
6645
6646                 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6647                 *queue_id = ring;
6648         }
6649
6650         return 0;
6651 }
6652
6653 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6654                               struct ethtool_rxnfc *cmd)
6655 {
6656         struct hclge_vport *vport = hclge_get_vport(handle);
6657         struct hclge_dev *hdev = vport->back;
6658         struct hclge_fd_user_def_info info;
6659         u16 dst_vport_id = 0, q_index = 0;
6660         struct ethtool_rx_flow_spec *fs;
6661         struct hclge_fd_rule *rule;
6662         u32 unused = 0;
6663         u8 action;
6664         int ret;
6665
6666         if (!hnae3_dev_fd_supported(hdev)) {
6667                 dev_err(&hdev->pdev->dev,
6668                         "flow table director is not supported\n");
6669                 return -EOPNOTSUPP;
6670         }
6671
6672         if (!hdev->fd_en) {
6673                 dev_err(&hdev->pdev->dev,
6674                         "please enable flow director first\n");
6675                 return -EOPNOTSUPP;
6676         }
6677
6678         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6679
6680         ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6681         if (ret)
6682                 return ret;
6683
6684         ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6685                                          &action, &q_index);
6686         if (ret)
6687                 return ret;
6688
6689         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6690         if (!rule)
6691                 return -ENOMEM;
6692
6693         ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6694         if (ret) {
6695                 kfree(rule);
6696                 return ret;
6697         }
6698
6699         rule->flow_type = fs->flow_type;
6700         rule->location = fs->location;
6701         rule->unused_tuple = unused;
6702         rule->vf_id = dst_vport_id;
6703         rule->queue_id = q_index;
6704         rule->action = action;
6705         rule->rule_type = HCLGE_FD_EP_ACTIVE;
6706
6707         ret = hclge_add_fd_entry_common(hdev, rule);
6708         if (ret)
6709                 kfree(rule);
6710
6711         return ret;
6712 }
6713
6714 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6715                               struct ethtool_rxnfc *cmd)
6716 {
6717         struct hclge_vport *vport = hclge_get_vport(handle);
6718         struct hclge_dev *hdev = vport->back;
6719         struct ethtool_rx_flow_spec *fs;
6720         int ret;
6721
6722         if (!hnae3_dev_fd_supported(hdev))
6723                 return -EOPNOTSUPP;
6724
6725         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6726
6727         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6728                 return -EINVAL;
6729
6730         spin_lock_bh(&hdev->fd_rule_lock);
6731         if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6732             !test_bit(fs->location, hdev->fd_bmap)) {
6733                 dev_err(&hdev->pdev->dev,
6734                         "Delete fail, rule %u is inexistent\n", fs->location);
6735                 spin_unlock_bh(&hdev->fd_rule_lock);
6736                 return -ENOENT;
6737         }
6738
6739         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6740                                    NULL, false);
6741         if (ret)
6742                 goto out;
6743
6744         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6745
6746 out:
6747         spin_unlock_bh(&hdev->fd_rule_lock);
6748         return ret;
6749 }
6750
6751 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6752                                          bool clear_list)
6753 {
6754         struct hclge_fd_rule *rule;
6755         struct hlist_node *node;
6756         u16 location;
6757
6758         if (!hnae3_dev_fd_supported(hdev))
6759                 return;
6760
6761         spin_lock_bh(&hdev->fd_rule_lock);
6762
6763         for_each_set_bit(location, hdev->fd_bmap,
6764                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6765                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6766                                      NULL, false);
6767
6768         if (clear_list) {
6769                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6770                                           rule_node) {
6771                         hlist_del(&rule->rule_node);
6772                         kfree(rule);
6773                 }
6774                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6775                 hdev->hclge_fd_rule_num = 0;
6776                 bitmap_zero(hdev->fd_bmap,
6777                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6778         }
6779
6780         spin_unlock_bh(&hdev->fd_rule_lock);
6781 }
6782
6783 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6784 {
6785         hclge_clear_fd_rules_in_list(hdev, true);
6786         hclge_fd_disable_user_def(hdev);
6787 }
6788
6789 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6790 {
6791         struct hclge_vport *vport = hclge_get_vport(handle);
6792         struct hclge_dev *hdev = vport->back;
6793         struct hclge_fd_rule *rule;
6794         struct hlist_node *node;
6795
6796         /* Return ok here, because reset error handling will check this
6797          * return value. If error is returned here, the reset process will
6798          * fail.
6799          */
6800         if (!hnae3_dev_fd_supported(hdev))
6801                 return 0;
6802
6803         /* if fd is disabled, should not restore it when reset */
6804         if (!hdev->fd_en)
6805                 return 0;
6806
6807         spin_lock_bh(&hdev->fd_rule_lock);
6808         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6809                 if (rule->state == HCLGE_FD_ACTIVE)
6810                         rule->state = HCLGE_FD_TO_ADD;
6811         }
6812         spin_unlock_bh(&hdev->fd_rule_lock);
6813         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6814
6815         return 0;
6816 }
6817
6818 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6819                                  struct ethtool_rxnfc *cmd)
6820 {
6821         struct hclge_vport *vport = hclge_get_vport(handle);
6822         struct hclge_dev *hdev = vport->back;
6823
6824         if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6825                 return -EOPNOTSUPP;
6826
6827         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6828         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6829
6830         return 0;
6831 }
6832
6833 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6834                                      struct ethtool_tcpip4_spec *spec,
6835                                      struct ethtool_tcpip4_spec *spec_mask)
6836 {
6837         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6838         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6839                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6840
6841         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6842         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6843                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6844
6845         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6846         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6847                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6848
6849         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6850         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6851                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6852
6853         spec->tos = rule->tuples.ip_tos;
6854         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6855                         0 : rule->tuples_mask.ip_tos;
6856 }
6857
6858 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6859                                   struct ethtool_usrip4_spec *spec,
6860                                   struct ethtool_usrip4_spec *spec_mask)
6861 {
6862         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6863         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6864                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6865
6866         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6867         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6868                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6869
6870         spec->tos = rule->tuples.ip_tos;
6871         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6872                         0 : rule->tuples_mask.ip_tos;
6873
6874         spec->proto = rule->tuples.ip_proto;
6875         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6876                         0 : rule->tuples_mask.ip_proto;
6877
6878         spec->ip_ver = ETH_RX_NFC_IP4;
6879 }
6880
6881 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6882                                      struct ethtool_tcpip6_spec *spec,
6883                                      struct ethtool_tcpip6_spec *spec_mask)
6884 {
6885         cpu_to_be32_array(spec->ip6src,
6886                           rule->tuples.src_ip, IPV6_SIZE);
6887         cpu_to_be32_array(spec->ip6dst,
6888                           rule->tuples.dst_ip, IPV6_SIZE);
6889         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6890                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6891         else
6892                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6893                                   IPV6_SIZE);
6894
6895         if (rule->unused_tuple & BIT(INNER_DST_IP))
6896                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6897         else
6898                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6899                                   IPV6_SIZE);
6900
6901         spec->tclass = rule->tuples.ip_tos;
6902         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6903                         0 : rule->tuples_mask.ip_tos;
6904
6905         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6906         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6907                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6908
6909         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6910         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6911                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6912 }
6913
6914 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6915                                   struct ethtool_usrip6_spec *spec,
6916                                   struct ethtool_usrip6_spec *spec_mask)
6917 {
6918         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6919         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6920         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6921                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6922         else
6923                 cpu_to_be32_array(spec_mask->ip6src,
6924                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6925
6926         if (rule->unused_tuple & BIT(INNER_DST_IP))
6927                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6928         else
6929                 cpu_to_be32_array(spec_mask->ip6dst,
6930                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6931
6932         spec->tclass = rule->tuples.ip_tos;
6933         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6934                         0 : rule->tuples_mask.ip_tos;
6935
6936         spec->l4_proto = rule->tuples.ip_proto;
6937         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6938                         0 : rule->tuples_mask.ip_proto;
6939 }
6940
6941 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6942                                     struct ethhdr *spec,
6943                                     struct ethhdr *spec_mask)
6944 {
6945         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6946         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6947
6948         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6949                 eth_zero_addr(spec_mask->h_source);
6950         else
6951                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6952
6953         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6954                 eth_zero_addr(spec_mask->h_dest);
6955         else
6956                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6957
6958         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6959         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6960                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6961 }
6962
6963 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6964                                        struct hclge_fd_rule *rule)
6965 {
6966         if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6967             HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6968                 fs->h_ext.data[0] = 0;
6969                 fs->h_ext.data[1] = 0;
6970                 fs->m_ext.data[0] = 0;
6971                 fs->m_ext.data[1] = 0;
6972         } else {
6973                 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6974                 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6975                 fs->m_ext.data[0] =
6976                                 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6977                 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6978         }
6979 }
6980
6981 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6982                                   struct hclge_fd_rule *rule)
6983 {
6984         if (fs->flow_type & FLOW_EXT) {
6985                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6986                 fs->m_ext.vlan_tci =
6987                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6988                                 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6989
6990                 hclge_fd_get_user_def_info(fs, rule);
6991         }
6992
6993         if (fs->flow_type & FLOW_MAC_EXT) {
6994                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6995                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6996                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6997                 else
6998                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6999                                         rule->tuples_mask.dst_mac);
7000         }
7001 }
7002
7003 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7004                                   struct ethtool_rxnfc *cmd)
7005 {
7006         struct hclge_vport *vport = hclge_get_vport(handle);
7007         struct hclge_fd_rule *rule = NULL;
7008         struct hclge_dev *hdev = vport->back;
7009         struct ethtool_rx_flow_spec *fs;
7010         struct hlist_node *node2;
7011
7012         if (!hnae3_dev_fd_supported(hdev))
7013                 return -EOPNOTSUPP;
7014
7015         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7016
7017         spin_lock_bh(&hdev->fd_rule_lock);
7018
7019         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7020                 if (rule->location >= fs->location)
7021                         break;
7022         }
7023
7024         if (!rule || fs->location != rule->location) {
7025                 spin_unlock_bh(&hdev->fd_rule_lock);
7026
7027                 return -ENOENT;
7028         }
7029
7030         fs->flow_type = rule->flow_type;
7031         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7032         case SCTP_V4_FLOW:
7033         case TCP_V4_FLOW:
7034         case UDP_V4_FLOW:
7035                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7036                                          &fs->m_u.tcp_ip4_spec);
7037                 break;
7038         case IP_USER_FLOW:
7039                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7040                                       &fs->m_u.usr_ip4_spec);
7041                 break;
7042         case SCTP_V6_FLOW:
7043         case TCP_V6_FLOW:
7044         case UDP_V6_FLOW:
7045                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7046                                          &fs->m_u.tcp_ip6_spec);
7047                 break;
7048         case IPV6_USER_FLOW:
7049                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7050                                       &fs->m_u.usr_ip6_spec);
7051                 break;
7052         /* The flow type of fd rule has been checked before adding in to rule
7053          * list. As other flow types have been handled, it must be ETHER_FLOW
7054          * for the default case
7055          */
7056         default:
7057                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7058                                         &fs->m_u.ether_spec);
7059                 break;
7060         }
7061
7062         hclge_fd_get_ext_info(fs, rule);
7063
7064         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7065                 fs->ring_cookie = RX_CLS_FLOW_DISC;
7066         } else {
7067                 u64 vf_id;
7068
7069                 fs->ring_cookie = rule->queue_id;
7070                 vf_id = rule->vf_id;
7071                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7072                 fs->ring_cookie |= vf_id;
7073         }
7074
7075         spin_unlock_bh(&hdev->fd_rule_lock);
7076
7077         return 0;
7078 }
7079
7080 static int hclge_get_all_rules(struct hnae3_handle *handle,
7081                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
7082 {
7083         struct hclge_vport *vport = hclge_get_vport(handle);
7084         struct hclge_dev *hdev = vport->back;
7085         struct hclge_fd_rule *rule;
7086         struct hlist_node *node2;
7087         int cnt = 0;
7088
7089         if (!hnae3_dev_fd_supported(hdev))
7090                 return -EOPNOTSUPP;
7091
7092         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7093
7094         spin_lock_bh(&hdev->fd_rule_lock);
7095         hlist_for_each_entry_safe(rule, node2,
7096                                   &hdev->fd_rule_list, rule_node) {
7097                 if (cnt == cmd->rule_cnt) {
7098                         spin_unlock_bh(&hdev->fd_rule_lock);
7099                         return -EMSGSIZE;
7100                 }
7101
7102                 if (rule->state == HCLGE_FD_TO_DEL)
7103                         continue;
7104
7105                 rule_locs[cnt] = rule->location;
7106                 cnt++;
7107         }
7108
7109         spin_unlock_bh(&hdev->fd_rule_lock);
7110
7111         cmd->rule_cnt = cnt;
7112
7113         return 0;
7114 }
7115
7116 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7117                                      struct hclge_fd_rule_tuples *tuples)
7118 {
7119 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7120 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7121
7122         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7123         tuples->ip_proto = fkeys->basic.ip_proto;
7124         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7125
7126         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7127                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7128                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7129         } else {
7130                 int i;
7131
7132                 for (i = 0; i < IPV6_SIZE; i++) {
7133                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7134                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7135                 }
7136         }
7137 }
7138
7139 /* traverse all rules, check whether an existed rule has the same tuples */
7140 static struct hclge_fd_rule *
7141 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7142                           const struct hclge_fd_rule_tuples *tuples)
7143 {
7144         struct hclge_fd_rule *rule = NULL;
7145         struct hlist_node *node;
7146
7147         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7148                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7149                         return rule;
7150         }
7151
7152         return NULL;
7153 }
7154
7155 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7156                                      struct hclge_fd_rule *rule)
7157 {
7158         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7159                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7160                              BIT(INNER_SRC_PORT);
7161         rule->action = 0;
7162         rule->vf_id = 0;
7163         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7164         rule->state = HCLGE_FD_TO_ADD;
7165         if (tuples->ether_proto == ETH_P_IP) {
7166                 if (tuples->ip_proto == IPPROTO_TCP)
7167                         rule->flow_type = TCP_V4_FLOW;
7168                 else
7169                         rule->flow_type = UDP_V4_FLOW;
7170         } else {
7171                 if (tuples->ip_proto == IPPROTO_TCP)
7172                         rule->flow_type = TCP_V6_FLOW;
7173                 else
7174                         rule->flow_type = UDP_V6_FLOW;
7175         }
7176         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7177         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7178 }
7179
7180 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7181                                       u16 flow_id, struct flow_keys *fkeys)
7182 {
7183         struct hclge_vport *vport = hclge_get_vport(handle);
7184         struct hclge_fd_rule_tuples new_tuples = {};
7185         struct hclge_dev *hdev = vport->back;
7186         struct hclge_fd_rule *rule;
7187         u16 bit_id;
7188
7189         if (!hnae3_dev_fd_supported(hdev))
7190                 return -EOPNOTSUPP;
7191
7192         /* when there is already fd rule existed add by user,
7193          * arfs should not work
7194          */
7195         spin_lock_bh(&hdev->fd_rule_lock);
7196         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7197             hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7198                 spin_unlock_bh(&hdev->fd_rule_lock);
7199                 return -EOPNOTSUPP;
7200         }
7201
7202         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7203
7204         /* check is there flow director filter existed for this flow,
7205          * if not, create a new filter for it;
7206          * if filter exist with different queue id, modify the filter;
7207          * if filter exist with same queue id, do nothing
7208          */
7209         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7210         if (!rule) {
7211                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7212                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7213                         spin_unlock_bh(&hdev->fd_rule_lock);
7214                         return -ENOSPC;
7215                 }
7216
7217                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7218                 if (!rule) {
7219                         spin_unlock_bh(&hdev->fd_rule_lock);
7220                         return -ENOMEM;
7221                 }
7222
7223                 rule->location = bit_id;
7224                 rule->arfs.flow_id = flow_id;
7225                 rule->queue_id = queue_id;
7226                 hclge_fd_build_arfs_rule(&new_tuples, rule);
7227                 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7228                 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7229         } else if (rule->queue_id != queue_id) {
7230                 rule->queue_id = queue_id;
7231                 rule->state = HCLGE_FD_TO_ADD;
7232                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7233                 hclge_task_schedule(hdev, 0);
7234         }
7235         spin_unlock_bh(&hdev->fd_rule_lock);
7236         return rule->location;
7237 }
7238
7239 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7240 {
7241 #ifdef CONFIG_RFS_ACCEL
7242         struct hnae3_handle *handle = &hdev->vport[0].nic;
7243         struct hclge_fd_rule *rule;
7244         struct hlist_node *node;
7245
7246         spin_lock_bh(&hdev->fd_rule_lock);
7247         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7248                 spin_unlock_bh(&hdev->fd_rule_lock);
7249                 return;
7250         }
7251         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7252                 if (rule->state != HCLGE_FD_ACTIVE)
7253                         continue;
7254                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7255                                         rule->arfs.flow_id, rule->location)) {
7256                         rule->state = HCLGE_FD_TO_DEL;
7257                         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7258                 }
7259         }
7260         spin_unlock_bh(&hdev->fd_rule_lock);
7261 #endif
7262 }
7263
7264 /* make sure being called after lock up with fd_rule_lock */
7265 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7266 {
7267 #ifdef CONFIG_RFS_ACCEL
7268         struct hclge_fd_rule *rule;
7269         struct hlist_node *node;
7270         int ret;
7271
7272         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7273                 return 0;
7274
7275         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7276                 switch (rule->state) {
7277                 case HCLGE_FD_TO_DEL:
7278                 case HCLGE_FD_ACTIVE:
7279                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7280                                                    rule->location, NULL, false);
7281                         if (ret)
7282                                 return ret;
7283                         fallthrough;
7284                 case HCLGE_FD_TO_ADD:
7285                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7286                         hlist_del(&rule->rule_node);
7287                         kfree(rule);
7288                         break;
7289                 default:
7290                         break;
7291                 }
7292         }
7293         hclge_sync_fd_state(hdev);
7294
7295 #endif
7296         return 0;
7297 }
7298
7299 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7300                                     struct hclge_fd_rule *rule)
7301 {
7302         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7303                 struct flow_match_basic match;
7304                 u16 ethtype_key, ethtype_mask;
7305
7306                 flow_rule_match_basic(flow, &match);
7307                 ethtype_key = ntohs(match.key->n_proto);
7308                 ethtype_mask = ntohs(match.mask->n_proto);
7309
7310                 if (ethtype_key == ETH_P_ALL) {
7311                         ethtype_key = 0;
7312                         ethtype_mask = 0;
7313                 }
7314                 rule->tuples.ether_proto = ethtype_key;
7315                 rule->tuples_mask.ether_proto = ethtype_mask;
7316                 rule->tuples.ip_proto = match.key->ip_proto;
7317                 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7318         } else {
7319                 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7320                 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7321         }
7322 }
7323
7324 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7325                                   struct hclge_fd_rule *rule)
7326 {
7327         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7328                 struct flow_match_eth_addrs match;
7329
7330                 flow_rule_match_eth_addrs(flow, &match);
7331                 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7332                 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7333                 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7334                 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7335         } else {
7336                 rule->unused_tuple |= BIT(INNER_DST_MAC);
7337                 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7338         }
7339 }
7340
7341 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7342                                    struct hclge_fd_rule *rule)
7343 {
7344         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7345                 struct flow_match_vlan match;
7346
7347                 flow_rule_match_vlan(flow, &match);
7348                 rule->tuples.vlan_tag1 = match.key->vlan_id |
7349                                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7350                 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7351                                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7352         } else {
7353                 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7354         }
7355 }
7356
7357 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7358                                  struct hclge_fd_rule *rule)
7359 {
7360         u16 addr_type = 0;
7361
7362         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7363                 struct flow_match_control match;
7364
7365                 flow_rule_match_control(flow, &match);
7366                 addr_type = match.key->addr_type;
7367         }
7368
7369         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7370                 struct flow_match_ipv4_addrs match;
7371
7372                 flow_rule_match_ipv4_addrs(flow, &match);
7373                 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7374                 rule->tuples_mask.src_ip[IPV4_INDEX] =
7375                                                 be32_to_cpu(match.mask->src);
7376                 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7377                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7378                                                 be32_to_cpu(match.mask->dst);
7379         } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7380                 struct flow_match_ipv6_addrs match;
7381
7382                 flow_rule_match_ipv6_addrs(flow, &match);
7383                 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7384                                   IPV6_SIZE);
7385                 be32_to_cpu_array(rule->tuples_mask.src_ip,
7386                                   match.mask->src.s6_addr32, IPV6_SIZE);
7387                 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7388                                   IPV6_SIZE);
7389                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7390                                   match.mask->dst.s6_addr32, IPV6_SIZE);
7391         } else {
7392                 rule->unused_tuple |= BIT(INNER_SRC_IP);
7393                 rule->unused_tuple |= BIT(INNER_DST_IP);
7394         }
7395 }
7396
7397 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7398                                    struct hclge_fd_rule *rule)
7399 {
7400         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7401                 struct flow_match_ports match;
7402
7403                 flow_rule_match_ports(flow, &match);
7404
7405                 rule->tuples.src_port = be16_to_cpu(match.key->src);
7406                 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7407                 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7408                 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7409         } else {
7410                 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7411                 rule->unused_tuple |= BIT(INNER_DST_PORT);
7412         }
7413 }
7414
7415 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7416                                   struct flow_cls_offload *cls_flower,
7417                                   struct hclge_fd_rule *rule)
7418 {
7419         struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7420         struct flow_dissector *dissector = flow->match.dissector;
7421
7422         if (dissector->used_keys &
7423             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7424               BIT(FLOW_DISSECTOR_KEY_BASIC) |
7425               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7426               BIT(FLOW_DISSECTOR_KEY_VLAN) |
7427               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7428               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7429               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7430                 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7431                         dissector->used_keys);
7432                 return -EOPNOTSUPP;
7433         }
7434
7435         hclge_get_cls_key_basic(flow, rule);
7436         hclge_get_cls_key_mac(flow, rule);
7437         hclge_get_cls_key_vlan(flow, rule);
7438         hclge_get_cls_key_ip(flow, rule);
7439         hclge_get_cls_key_port(flow, rule);
7440
7441         return 0;
7442 }
7443
7444 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7445                                   struct flow_cls_offload *cls_flower, int tc)
7446 {
7447         u32 prio = cls_flower->common.prio;
7448
7449         if (tc < 0 || tc > hdev->tc_max) {
7450                 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7451                 return -EINVAL;
7452         }
7453
7454         if (prio == 0 ||
7455             prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7456                 dev_err(&hdev->pdev->dev,
7457                         "prio %u should be in range[1, %u]\n",
7458                         prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7459                 return -EINVAL;
7460         }
7461
7462         if (test_bit(prio - 1, hdev->fd_bmap)) {
7463                 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7464                 return -EINVAL;
7465         }
7466         return 0;
7467 }
7468
7469 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7470                                 struct flow_cls_offload *cls_flower,
7471                                 int tc)
7472 {
7473         struct hclge_vport *vport = hclge_get_vport(handle);
7474         struct hclge_dev *hdev = vport->back;
7475         struct hclge_fd_rule *rule;
7476         int ret;
7477
7478         ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7479         if (ret) {
7480                 dev_err(&hdev->pdev->dev,
7481                         "failed to check cls flower params, ret = %d\n", ret);
7482                 return ret;
7483         }
7484
7485         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7486         if (!rule)
7487                 return -ENOMEM;
7488
7489         ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7490         if (ret) {
7491                 kfree(rule);
7492                 return ret;
7493         }
7494
7495         rule->action = HCLGE_FD_ACTION_SELECT_TC;
7496         rule->cls_flower.tc = tc;
7497         rule->location = cls_flower->common.prio - 1;
7498         rule->vf_id = 0;
7499         rule->cls_flower.cookie = cls_flower->cookie;
7500         rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7501
7502         ret = hclge_add_fd_entry_common(hdev, rule);
7503         if (ret)
7504                 kfree(rule);
7505
7506         return ret;
7507 }
7508
7509 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7510                                                    unsigned long cookie)
7511 {
7512         struct hclge_fd_rule *rule;
7513         struct hlist_node *node;
7514
7515         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7516                 if (rule->cls_flower.cookie == cookie)
7517                         return rule;
7518         }
7519
7520         return NULL;
7521 }
7522
7523 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7524                                 struct flow_cls_offload *cls_flower)
7525 {
7526         struct hclge_vport *vport = hclge_get_vport(handle);
7527         struct hclge_dev *hdev = vport->back;
7528         struct hclge_fd_rule *rule;
7529         int ret;
7530
7531         spin_lock_bh(&hdev->fd_rule_lock);
7532
7533         rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7534         if (!rule) {
7535                 spin_unlock_bh(&hdev->fd_rule_lock);
7536                 return -EINVAL;
7537         }
7538
7539         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7540                                    NULL, false);
7541         if (ret) {
7542                 spin_unlock_bh(&hdev->fd_rule_lock);
7543                 return ret;
7544         }
7545
7546         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7547         spin_unlock_bh(&hdev->fd_rule_lock);
7548
7549         return 0;
7550 }
7551
7552 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7553 {
7554         struct hclge_fd_rule *rule;
7555         struct hlist_node *node;
7556         int ret = 0;
7557
7558         if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7559                 return;
7560
7561         spin_lock_bh(&hdev->fd_rule_lock);
7562
7563         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7564                 switch (rule->state) {
7565                 case HCLGE_FD_TO_ADD:
7566                         ret = hclge_fd_config_rule(hdev, rule);
7567                         if (ret)
7568                                 goto out;
7569                         rule->state = HCLGE_FD_ACTIVE;
7570                         break;
7571                 case HCLGE_FD_TO_DEL:
7572                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7573                                                    rule->location, NULL, false);
7574                         if (ret)
7575                                 goto out;
7576                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7577                         hclge_fd_free_node(hdev, rule);
7578                         break;
7579                 default:
7580                         break;
7581                 }
7582         }
7583
7584 out:
7585         if (ret)
7586                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7587
7588         spin_unlock_bh(&hdev->fd_rule_lock);
7589 }
7590
7591 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7592 {
7593         if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7594                 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7595
7596                 hclge_clear_fd_rules_in_list(hdev, clear_list);
7597         }
7598
7599         hclge_sync_fd_user_def_cfg(hdev, false);
7600
7601         hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7602 }
7603
7604 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7605 {
7606         struct hclge_vport *vport = hclge_get_vport(handle);
7607         struct hclge_dev *hdev = vport->back;
7608
7609         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7610                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7611 }
7612
7613 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7614 {
7615         struct hclge_vport *vport = hclge_get_vport(handle);
7616         struct hclge_dev *hdev = vport->back;
7617
7618         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7619 }
7620
7621 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7622 {
7623         struct hclge_vport *vport = hclge_get_vport(handle);
7624         struct hclge_dev *hdev = vport->back;
7625
7626         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7627 }
7628
7629 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7630 {
7631         struct hclge_vport *vport = hclge_get_vport(handle);
7632         struct hclge_dev *hdev = vport->back;
7633
7634         return hdev->rst_stats.hw_reset_done_cnt;
7635 }
7636
7637 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7638 {
7639         struct hclge_vport *vport = hclge_get_vport(handle);
7640         struct hclge_dev *hdev = vport->back;
7641
7642         hdev->fd_en = enable;
7643
7644         if (!enable)
7645                 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7646         else
7647                 hclge_restore_fd_entries(handle);
7648
7649         hclge_task_schedule(hdev, 0);
7650 }
7651
7652 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7653 {
7654         struct hclge_desc desc;
7655         struct hclge_config_mac_mode_cmd *req =
7656                 (struct hclge_config_mac_mode_cmd *)desc.data;
7657         u32 loop_en = 0;
7658         int ret;
7659
7660         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7661
7662         if (enable) {
7663                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7664                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7665                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7666                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7667                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7668                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7669                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7670                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7671                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7672                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7673         }
7674
7675         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7676
7677         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7678         if (ret)
7679                 dev_err(&hdev->pdev->dev,
7680                         "mac enable fail, ret =%d.\n", ret);
7681 }
7682
7683 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7684                                      u8 switch_param, u8 param_mask)
7685 {
7686         struct hclge_mac_vlan_switch_cmd *req;
7687         struct hclge_desc desc;
7688         u32 func_id;
7689         int ret;
7690
7691         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7692         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7693
7694         /* read current config parameter */
7695         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7696                                    true);
7697         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7698         req->func_id = cpu_to_le32(func_id);
7699
7700         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7701         if (ret) {
7702                 dev_err(&hdev->pdev->dev,
7703                         "read mac vlan switch parameter fail, ret = %d\n", ret);
7704                 return ret;
7705         }
7706
7707         /* modify and write new config parameter */
7708         hclge_cmd_reuse_desc(&desc, false);
7709         req->switch_param = (req->switch_param & param_mask) | switch_param;
7710         req->param_mask = param_mask;
7711
7712         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7713         if (ret)
7714                 dev_err(&hdev->pdev->dev,
7715                         "set mac vlan switch parameter fail, ret = %d\n", ret);
7716         return ret;
7717 }
7718
7719 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7720                                        int link_ret)
7721 {
7722 #define HCLGE_PHY_LINK_STATUS_NUM  200
7723
7724         struct phy_device *phydev = hdev->hw.mac.phydev;
7725         int i = 0;
7726         int ret;
7727
7728         do {
7729                 ret = phy_read_status(phydev);
7730                 if (ret) {
7731                         dev_err(&hdev->pdev->dev,
7732                                 "phy update link status fail, ret = %d\n", ret);
7733                         return;
7734                 }
7735
7736                 if (phydev->link == link_ret)
7737                         break;
7738
7739                 msleep(HCLGE_LINK_STATUS_MS);
7740         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7741 }
7742
7743 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7744 {
7745 #define HCLGE_MAC_LINK_STATUS_NUM  100
7746
7747         int link_status;
7748         int i = 0;
7749         int ret;
7750
7751         do {
7752                 ret = hclge_get_mac_link_status(hdev, &link_status);
7753                 if (ret)
7754                         return ret;
7755                 if (link_status == link_ret)
7756                         return 0;
7757
7758                 msleep(HCLGE_LINK_STATUS_MS);
7759         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7760         return -EBUSY;
7761 }
7762
7763 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7764                                           bool is_phy)
7765 {
7766         int link_ret;
7767
7768         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7769
7770         if (is_phy)
7771                 hclge_phy_link_status_wait(hdev, link_ret);
7772
7773         return hclge_mac_link_status_wait(hdev, link_ret);
7774 }
7775
7776 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7777 {
7778         struct hclge_config_mac_mode_cmd *req;
7779         struct hclge_desc desc;
7780         u32 loop_en;
7781         int ret;
7782
7783         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7784         /* 1 Read out the MAC mode config at first */
7785         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7786         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7787         if (ret) {
7788                 dev_err(&hdev->pdev->dev,
7789                         "mac loopback get fail, ret =%d.\n", ret);
7790                 return ret;
7791         }
7792
7793         /* 2 Then setup the loopback flag */
7794         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7795         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7796
7797         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7798
7799         /* 3 Config mac work mode with loopback flag
7800          * and its original configure parameters
7801          */
7802         hclge_cmd_reuse_desc(&desc, false);
7803         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7804         if (ret)
7805                 dev_err(&hdev->pdev->dev,
7806                         "mac loopback set fail, ret =%d.\n", ret);
7807         return ret;
7808 }
7809
7810 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7811                                      enum hnae3_loop loop_mode)
7812 {
7813 #define HCLGE_COMMON_LB_RETRY_MS        10
7814 #define HCLGE_COMMON_LB_RETRY_NUM       100
7815
7816         struct hclge_common_lb_cmd *req;
7817         struct hclge_desc desc;
7818         int ret, i = 0;
7819         u8 loop_mode_b;
7820
7821         req = (struct hclge_common_lb_cmd *)desc.data;
7822         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7823
7824         switch (loop_mode) {
7825         case HNAE3_LOOP_SERIAL_SERDES:
7826                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7827                 break;
7828         case HNAE3_LOOP_PARALLEL_SERDES:
7829                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7830                 break;
7831         case HNAE3_LOOP_PHY:
7832                 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7833                 break;
7834         default:
7835                 dev_err(&hdev->pdev->dev,
7836                         "unsupported common loopback mode %d\n", loop_mode);
7837                 return -ENOTSUPP;
7838         }
7839
7840         if (en) {
7841                 req->enable = loop_mode_b;
7842                 req->mask = loop_mode_b;
7843         } else {
7844                 req->mask = loop_mode_b;
7845         }
7846
7847         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7848         if (ret) {
7849                 dev_err(&hdev->pdev->dev,
7850                         "common loopback set fail, ret = %d\n", ret);
7851                 return ret;
7852         }
7853
7854         do {
7855                 msleep(HCLGE_COMMON_LB_RETRY_MS);
7856                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7857                                            true);
7858                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7859                 if (ret) {
7860                         dev_err(&hdev->pdev->dev,
7861                                 "common loopback get, ret = %d\n", ret);
7862                         return ret;
7863                 }
7864         } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7865                  !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7866
7867         if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7868                 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7869                 return -EBUSY;
7870         } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7871                 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7872                 return -EIO;
7873         }
7874         return ret;
7875 }
7876
7877 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7878                                      enum hnae3_loop loop_mode)
7879 {
7880         int ret;
7881
7882         ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7883         if (ret)
7884                 return ret;
7885
7886         hclge_cfg_mac_mode(hdev, en);
7887
7888         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7889         if (ret)
7890                 dev_err(&hdev->pdev->dev,
7891                         "serdes loopback config mac mode timeout\n");
7892
7893         return ret;
7894 }
7895
7896 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7897                                      struct phy_device *phydev)
7898 {
7899         int ret;
7900
7901         if (!phydev->suspended) {
7902                 ret = phy_suspend(phydev);
7903                 if (ret)
7904                         return ret;
7905         }
7906
7907         ret = phy_resume(phydev);
7908         if (ret)
7909                 return ret;
7910
7911         return phy_loopback(phydev, true);
7912 }
7913
7914 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7915                                       struct phy_device *phydev)
7916 {
7917         int ret;
7918
7919         ret = phy_loopback(phydev, false);
7920         if (ret)
7921                 return ret;
7922
7923         return phy_suspend(phydev);
7924 }
7925
7926 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7927 {
7928         struct phy_device *phydev = hdev->hw.mac.phydev;
7929         int ret;
7930
7931         if (!phydev) {
7932                 if (hnae3_dev_phy_imp_supported(hdev))
7933                         return hclge_set_common_loopback(hdev, en,
7934                                                          HNAE3_LOOP_PHY);
7935                 return -ENOTSUPP;
7936         }
7937
7938         if (en)
7939                 ret = hclge_enable_phy_loopback(hdev, phydev);
7940         else
7941                 ret = hclge_disable_phy_loopback(hdev, phydev);
7942         if (ret) {
7943                 dev_err(&hdev->pdev->dev,
7944                         "set phy loopback fail, ret = %d\n", ret);
7945                 return ret;
7946         }
7947
7948         hclge_cfg_mac_mode(hdev, en);
7949
7950         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7951         if (ret)
7952                 dev_err(&hdev->pdev->dev,
7953                         "phy loopback config mac mode timeout\n");
7954
7955         return ret;
7956 }
7957
7958 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7959                                      u16 stream_id, bool enable)
7960 {
7961         struct hclge_desc desc;
7962         struct hclge_cfg_com_tqp_queue_cmd *req =
7963                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7964
7965         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7966         req->tqp_id = cpu_to_le16(tqp_id);
7967         req->stream_id = cpu_to_le16(stream_id);
7968         if (enable)
7969                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7970
7971         return hclge_cmd_send(&hdev->hw, &desc, 1);
7972 }
7973
7974 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7975 {
7976         struct hclge_vport *vport = hclge_get_vport(handle);
7977         struct hclge_dev *hdev = vport->back;
7978         int ret;
7979         u16 i;
7980
7981         for (i = 0; i < handle->kinfo.num_tqps; i++) {
7982                 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7983                 if (ret)
7984                         return ret;
7985         }
7986         return 0;
7987 }
7988
7989 static int hclge_set_loopback(struct hnae3_handle *handle,
7990                               enum hnae3_loop loop_mode, bool en)
7991 {
7992         struct hclge_vport *vport = hclge_get_vport(handle);
7993         struct hclge_dev *hdev = vport->back;
7994         int ret;
7995
7996         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7997          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7998          * the same, the packets are looped back in the SSU. If SSU loopback
7999          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8000          */
8001         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8002                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8003
8004                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8005                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
8006                 if (ret)
8007                         return ret;
8008         }
8009
8010         switch (loop_mode) {
8011         case HNAE3_LOOP_APP:
8012                 ret = hclge_set_app_loopback(hdev, en);
8013                 break;
8014         case HNAE3_LOOP_SERIAL_SERDES:
8015         case HNAE3_LOOP_PARALLEL_SERDES:
8016                 ret = hclge_set_common_loopback(hdev, en, loop_mode);
8017                 break;
8018         case HNAE3_LOOP_PHY:
8019                 ret = hclge_set_phy_loopback(hdev, en);
8020                 break;
8021         default:
8022                 ret = -ENOTSUPP;
8023                 dev_err(&hdev->pdev->dev,
8024                         "loop_mode %d is not supported\n", loop_mode);
8025                 break;
8026         }
8027
8028         if (ret)
8029                 return ret;
8030
8031         ret = hclge_tqp_enable(handle, en);
8032         if (ret)
8033                 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8034                         en ? "enable" : "disable", ret);
8035
8036         return ret;
8037 }
8038
8039 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8040 {
8041         int ret;
8042
8043         ret = hclge_set_app_loopback(hdev, false);
8044         if (ret)
8045                 return ret;
8046
8047         ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8048         if (ret)
8049                 return ret;
8050
8051         return hclge_cfg_common_loopback(hdev, false,
8052                                          HNAE3_LOOP_PARALLEL_SERDES);
8053 }
8054
8055 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8056 {
8057         struct hclge_vport *vport = hclge_get_vport(handle);
8058         struct hnae3_knic_private_info *kinfo;
8059         struct hnae3_queue *queue;
8060         struct hclge_tqp *tqp;
8061         int i;
8062
8063         kinfo = &vport->nic.kinfo;
8064         for (i = 0; i < kinfo->num_tqps; i++) {
8065                 queue = handle->kinfo.tqp[i];
8066                 tqp = container_of(queue, struct hclge_tqp, q);
8067                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8068         }
8069 }
8070
8071 static void hclge_flush_link_update(struct hclge_dev *hdev)
8072 {
8073 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
8074
8075         unsigned long last = hdev->serv_processed_cnt;
8076         int i = 0;
8077
8078         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8079                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8080                last == hdev->serv_processed_cnt)
8081                 usleep_range(1, 1);
8082 }
8083
8084 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8085 {
8086         struct hclge_vport *vport = hclge_get_vport(handle);
8087         struct hclge_dev *hdev = vport->back;
8088
8089         if (enable) {
8090                 hclge_task_schedule(hdev, 0);
8091         } else {
8092                 /* Set the DOWN flag here to disable link updating */
8093                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8094
8095                 /* flush memory to make sure DOWN is seen by service task */
8096                 smp_mb__before_atomic();
8097                 hclge_flush_link_update(hdev);
8098         }
8099 }
8100
8101 static int hclge_ae_start(struct hnae3_handle *handle)
8102 {
8103         struct hclge_vport *vport = hclge_get_vport(handle);
8104         struct hclge_dev *hdev = vport->back;
8105
8106         /* mac enable */
8107         hclge_cfg_mac_mode(hdev, true);
8108         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8109         hdev->hw.mac.link = 0;
8110
8111         /* reset tqp stats */
8112         hclge_reset_tqp_stats(handle);
8113
8114         hclge_mac_start_phy(hdev);
8115
8116         return 0;
8117 }
8118
8119 static void hclge_ae_stop(struct hnae3_handle *handle)
8120 {
8121         struct hclge_vport *vport = hclge_get_vport(handle);
8122         struct hclge_dev *hdev = vport->back;
8123
8124         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8125         spin_lock_bh(&hdev->fd_rule_lock);
8126         hclge_clear_arfs_rules(hdev);
8127         spin_unlock_bh(&hdev->fd_rule_lock);
8128
8129         /* If it is not PF reset, the firmware will disable the MAC,
8130          * so it only need to stop phy here.
8131          */
8132         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8133             hdev->reset_type != HNAE3_FUNC_RESET) {
8134                 hclge_mac_stop_phy(hdev);
8135                 hclge_update_link_status(hdev);
8136                 return;
8137         }
8138
8139         hclge_reset_tqp(handle);
8140
8141         hclge_config_mac_tnl_int(hdev, false);
8142
8143         /* Mac disable */
8144         hclge_cfg_mac_mode(hdev, false);
8145
8146         hclge_mac_stop_phy(hdev);
8147
8148         /* reset tqp stats */
8149         hclge_reset_tqp_stats(handle);
8150         hclge_update_link_status(hdev);
8151 }
8152
8153 int hclge_vport_start(struct hclge_vport *vport)
8154 {
8155         struct hclge_dev *hdev = vport->back;
8156
8157         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8158         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8159         vport->last_active_jiffies = jiffies;
8160
8161         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8162                 if (vport->vport_id) {
8163                         hclge_restore_mac_table_common(vport);
8164                         hclge_restore_vport_vlan_table(vport);
8165                 } else {
8166                         hclge_restore_hw_table(hdev);
8167                 }
8168         }
8169
8170         clear_bit(vport->vport_id, hdev->vport_config_block);
8171
8172         return 0;
8173 }
8174
8175 void hclge_vport_stop(struct hclge_vport *vport)
8176 {
8177         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8178 }
8179
8180 static int hclge_client_start(struct hnae3_handle *handle)
8181 {
8182         struct hclge_vport *vport = hclge_get_vport(handle);
8183
8184         return hclge_vport_start(vport);
8185 }
8186
8187 static void hclge_client_stop(struct hnae3_handle *handle)
8188 {
8189         struct hclge_vport *vport = hclge_get_vport(handle);
8190
8191         hclge_vport_stop(vport);
8192 }
8193
8194 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8195                                          u16 cmdq_resp, u8  resp_code,
8196                                          enum hclge_mac_vlan_tbl_opcode op)
8197 {
8198         struct hclge_dev *hdev = vport->back;
8199
8200         if (cmdq_resp) {
8201                 dev_err(&hdev->pdev->dev,
8202                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8203                         cmdq_resp);
8204                 return -EIO;
8205         }
8206
8207         if (op == HCLGE_MAC_VLAN_ADD) {
8208                 if (!resp_code || resp_code == 1)
8209                         return 0;
8210                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8211                          resp_code == HCLGE_ADD_MC_OVERFLOW)
8212                         return -ENOSPC;
8213
8214                 dev_err(&hdev->pdev->dev,
8215                         "add mac addr failed for undefined, code=%u.\n",
8216                         resp_code);
8217                 return -EIO;
8218         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8219                 if (!resp_code) {
8220                         return 0;
8221                 } else if (resp_code == 1) {
8222                         dev_dbg(&hdev->pdev->dev,
8223                                 "remove mac addr failed for miss.\n");
8224                         return -ENOENT;
8225                 }
8226
8227                 dev_err(&hdev->pdev->dev,
8228                         "remove mac addr failed for undefined, code=%u.\n",
8229                         resp_code);
8230                 return -EIO;
8231         } else if (op == HCLGE_MAC_VLAN_LKUP) {
8232                 if (!resp_code) {
8233                         return 0;
8234                 } else if (resp_code == 1) {
8235                         dev_dbg(&hdev->pdev->dev,
8236                                 "lookup mac addr failed for miss.\n");
8237                         return -ENOENT;
8238                 }
8239
8240                 dev_err(&hdev->pdev->dev,
8241                         "lookup mac addr failed for undefined, code=%u.\n",
8242                         resp_code);
8243                 return -EIO;
8244         }
8245
8246         dev_err(&hdev->pdev->dev,
8247                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8248
8249         return -EINVAL;
8250 }
8251
8252 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8253 {
8254 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8255
8256         unsigned int word_num;
8257         unsigned int bit_num;
8258
8259         if (vfid > 255 || vfid < 0)
8260                 return -EIO;
8261
8262         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8263                 word_num = vfid / 32;
8264                 bit_num  = vfid % 32;
8265                 if (clr)
8266                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8267                 else
8268                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8269         } else {
8270                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8271                 bit_num  = vfid % 32;
8272                 if (clr)
8273                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8274                 else
8275                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8276         }
8277
8278         return 0;
8279 }
8280
8281 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8282 {
8283 #define HCLGE_DESC_NUMBER 3
8284 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8285         int i, j;
8286
8287         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8288                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8289                         if (desc[i].data[j])
8290                                 return false;
8291
8292         return true;
8293 }
8294
8295 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8296                                    const u8 *addr, bool is_mc)
8297 {
8298         const unsigned char *mac_addr = addr;
8299         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8300                        (mac_addr[0]) | (mac_addr[1] << 8);
8301         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8302
8303         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8304         if (is_mc) {
8305                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8306                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8307         }
8308
8309         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8310         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8311 }
8312
8313 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8314                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
8315 {
8316         struct hclge_dev *hdev = vport->back;
8317         struct hclge_desc desc;
8318         u8 resp_code;
8319         u16 retval;
8320         int ret;
8321
8322         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8323
8324         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8325
8326         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8327         if (ret) {
8328                 dev_err(&hdev->pdev->dev,
8329                         "del mac addr failed for cmd_send, ret =%d.\n",
8330                         ret);
8331                 return ret;
8332         }
8333         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8334         retval = le16_to_cpu(desc.retval);
8335
8336         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8337                                              HCLGE_MAC_VLAN_REMOVE);
8338 }
8339
8340 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8341                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
8342                                      struct hclge_desc *desc,
8343                                      bool is_mc)
8344 {
8345         struct hclge_dev *hdev = vport->back;
8346         u8 resp_code;
8347         u16 retval;
8348         int ret;
8349
8350         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8351         if (is_mc) {
8352                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8353                 memcpy(desc[0].data,
8354                        req,
8355                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8356                 hclge_cmd_setup_basic_desc(&desc[1],
8357                                            HCLGE_OPC_MAC_VLAN_ADD,
8358                                            true);
8359                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8360                 hclge_cmd_setup_basic_desc(&desc[2],
8361                                            HCLGE_OPC_MAC_VLAN_ADD,
8362                                            true);
8363                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8364         } else {
8365                 memcpy(desc[0].data,
8366                        req,
8367                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8368                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8369         }
8370         if (ret) {
8371                 dev_err(&hdev->pdev->dev,
8372                         "lookup mac addr failed for cmd_send, ret =%d.\n",
8373                         ret);
8374                 return ret;
8375         }
8376         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8377         retval = le16_to_cpu(desc[0].retval);
8378
8379         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8380                                              HCLGE_MAC_VLAN_LKUP);
8381 }
8382
8383 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8384                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
8385                                   struct hclge_desc *mc_desc)
8386 {
8387         struct hclge_dev *hdev = vport->back;
8388         int cfg_status;
8389         u8 resp_code;
8390         u16 retval;
8391         int ret;
8392
8393         if (!mc_desc) {
8394                 struct hclge_desc desc;
8395
8396                 hclge_cmd_setup_basic_desc(&desc,
8397                                            HCLGE_OPC_MAC_VLAN_ADD,
8398                                            false);
8399                 memcpy(desc.data, req,
8400                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8401                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8402                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8403                 retval = le16_to_cpu(desc.retval);
8404
8405                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8406                                                            resp_code,
8407                                                            HCLGE_MAC_VLAN_ADD);
8408         } else {
8409                 hclge_cmd_reuse_desc(&mc_desc[0], false);
8410                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8411                 hclge_cmd_reuse_desc(&mc_desc[1], false);
8412                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8413                 hclge_cmd_reuse_desc(&mc_desc[2], false);
8414                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8415                 memcpy(mc_desc[0].data, req,
8416                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8417                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8418                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8419                 retval = le16_to_cpu(mc_desc[0].retval);
8420
8421                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8422                                                            resp_code,
8423                                                            HCLGE_MAC_VLAN_ADD);
8424         }
8425
8426         if (ret) {
8427                 dev_err(&hdev->pdev->dev,
8428                         "add mac addr failed for cmd_send, ret =%d.\n",
8429                         ret);
8430                 return ret;
8431         }
8432
8433         return cfg_status;
8434 }
8435
8436 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8437                                u16 *allocated_size)
8438 {
8439         struct hclge_umv_spc_alc_cmd *req;
8440         struct hclge_desc desc;
8441         int ret;
8442
8443         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8444         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8445
8446         req->space_size = cpu_to_le32(space_size);
8447
8448         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8449         if (ret) {
8450                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8451                         ret);
8452                 return ret;
8453         }
8454
8455         *allocated_size = le32_to_cpu(desc.data[1]);
8456
8457         return 0;
8458 }
8459
8460 static int hclge_init_umv_space(struct hclge_dev *hdev)
8461 {
8462         u16 allocated_size = 0;
8463         int ret;
8464
8465         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8466         if (ret)
8467                 return ret;
8468
8469         if (allocated_size < hdev->wanted_umv_size)
8470                 dev_warn(&hdev->pdev->dev,
8471                          "failed to alloc umv space, want %u, get %u\n",
8472                          hdev->wanted_umv_size, allocated_size);
8473
8474         hdev->max_umv_size = allocated_size;
8475         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8476         hdev->share_umv_size = hdev->priv_umv_size +
8477                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8478
8479         return 0;
8480 }
8481
8482 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8483 {
8484         struct hclge_vport *vport;
8485         int i;
8486
8487         for (i = 0; i < hdev->num_alloc_vport; i++) {
8488                 vport = &hdev->vport[i];
8489                 vport->used_umv_num = 0;
8490         }
8491
8492         mutex_lock(&hdev->vport_lock);
8493         hdev->share_umv_size = hdev->priv_umv_size +
8494                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8495         mutex_unlock(&hdev->vport_lock);
8496 }
8497
8498 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8499 {
8500         struct hclge_dev *hdev = vport->back;
8501         bool is_full;
8502
8503         if (need_lock)
8504                 mutex_lock(&hdev->vport_lock);
8505
8506         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8507                    hdev->share_umv_size == 0);
8508
8509         if (need_lock)
8510                 mutex_unlock(&hdev->vport_lock);
8511
8512         return is_full;
8513 }
8514
8515 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8516 {
8517         struct hclge_dev *hdev = vport->back;
8518
8519         if (is_free) {
8520                 if (vport->used_umv_num > hdev->priv_umv_size)
8521                         hdev->share_umv_size++;
8522
8523                 if (vport->used_umv_num > 0)
8524                         vport->used_umv_num--;
8525         } else {
8526                 if (vport->used_umv_num >= hdev->priv_umv_size &&
8527                     hdev->share_umv_size > 0)
8528                         hdev->share_umv_size--;
8529                 vport->used_umv_num++;
8530         }
8531 }
8532
8533 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8534                                                   const u8 *mac_addr)
8535 {
8536         struct hclge_mac_node *mac_node, *tmp;
8537
8538         list_for_each_entry_safe(mac_node, tmp, list, node)
8539                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8540                         return mac_node;
8541
8542         return NULL;
8543 }
8544
8545 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8546                                   enum HCLGE_MAC_NODE_STATE state)
8547 {
8548         switch (state) {
8549         /* from set_rx_mode or tmp_add_list */
8550         case HCLGE_MAC_TO_ADD:
8551                 if (mac_node->state == HCLGE_MAC_TO_DEL)
8552                         mac_node->state = HCLGE_MAC_ACTIVE;
8553                 break;
8554         /* only from set_rx_mode */
8555         case HCLGE_MAC_TO_DEL:
8556                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8557                         list_del(&mac_node->node);
8558                         kfree(mac_node);
8559                 } else {
8560                         mac_node->state = HCLGE_MAC_TO_DEL;
8561                 }
8562                 break;
8563         /* only from tmp_add_list, the mac_node->state won't be
8564          * ACTIVE.
8565          */
8566         case HCLGE_MAC_ACTIVE:
8567                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8568                         mac_node->state = HCLGE_MAC_ACTIVE;
8569
8570                 break;
8571         }
8572 }
8573
8574 int hclge_update_mac_list(struct hclge_vport *vport,
8575                           enum HCLGE_MAC_NODE_STATE state,
8576                           enum HCLGE_MAC_ADDR_TYPE mac_type,
8577                           const unsigned char *addr)
8578 {
8579         struct hclge_dev *hdev = vport->back;
8580         struct hclge_mac_node *mac_node;
8581         struct list_head *list;
8582
8583         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8584                 &vport->uc_mac_list : &vport->mc_mac_list;
8585
8586         spin_lock_bh(&vport->mac_list_lock);
8587
8588         /* if the mac addr is already in the mac list, no need to add a new
8589          * one into it, just check the mac addr state, convert it to a new
8590          * state, or just remove it, or do nothing.
8591          */
8592         mac_node = hclge_find_mac_node(list, addr);
8593         if (mac_node) {
8594                 hclge_update_mac_node(mac_node, state);
8595                 spin_unlock_bh(&vport->mac_list_lock);
8596                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8597                 return 0;
8598         }
8599
8600         /* if this address is never added, unnecessary to delete */
8601         if (state == HCLGE_MAC_TO_DEL) {
8602                 spin_unlock_bh(&vport->mac_list_lock);
8603                 dev_err(&hdev->pdev->dev,
8604                         "failed to delete address %pM from mac list\n",
8605                         addr);
8606                 return -ENOENT;
8607         }
8608
8609         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8610         if (!mac_node) {
8611                 spin_unlock_bh(&vport->mac_list_lock);
8612                 return -ENOMEM;
8613         }
8614
8615         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8616
8617         mac_node->state = state;
8618         ether_addr_copy(mac_node->mac_addr, addr);
8619         list_add_tail(&mac_node->node, list);
8620
8621         spin_unlock_bh(&vport->mac_list_lock);
8622
8623         return 0;
8624 }
8625
8626 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8627                              const unsigned char *addr)
8628 {
8629         struct hclge_vport *vport = hclge_get_vport(handle);
8630
8631         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8632                                      addr);
8633 }
8634
8635 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8636                              const unsigned char *addr)
8637 {
8638         struct hclge_dev *hdev = vport->back;
8639         struct hclge_mac_vlan_tbl_entry_cmd req;
8640         struct hclge_desc desc;
8641         u16 egress_port = 0;
8642         int ret;
8643
8644         /* mac addr check */
8645         if (is_zero_ether_addr(addr) ||
8646             is_broadcast_ether_addr(addr) ||
8647             is_multicast_ether_addr(addr)) {
8648                 dev_err(&hdev->pdev->dev,
8649                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8650                          addr, is_zero_ether_addr(addr),
8651                          is_broadcast_ether_addr(addr),
8652                          is_multicast_ether_addr(addr));
8653                 return -EINVAL;
8654         }
8655
8656         memset(&req, 0, sizeof(req));
8657
8658         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8659                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8660
8661         req.egress_port = cpu_to_le16(egress_port);
8662
8663         hclge_prepare_mac_addr(&req, addr, false);
8664
8665         /* Lookup the mac address in the mac_vlan table, and add
8666          * it if the entry is inexistent. Repeated unicast entry
8667          * is not allowed in the mac vlan table.
8668          */
8669         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8670         if (ret == -ENOENT) {
8671                 mutex_lock(&hdev->vport_lock);
8672                 if (!hclge_is_umv_space_full(vport, false)) {
8673                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8674                         if (!ret)
8675                                 hclge_update_umv_space(vport, false);
8676                         mutex_unlock(&hdev->vport_lock);
8677                         return ret;
8678                 }
8679                 mutex_unlock(&hdev->vport_lock);
8680
8681                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8682                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8683                                 hdev->priv_umv_size);
8684
8685                 return -ENOSPC;
8686         }
8687
8688         /* check if we just hit the duplicate */
8689         if (!ret) {
8690                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8691                          vport->vport_id, addr);
8692                 return 0;
8693         }
8694
8695         dev_err(&hdev->pdev->dev,
8696                 "PF failed to add unicast entry(%pM) in the MAC table\n",
8697                 addr);
8698
8699         return ret;
8700 }
8701
8702 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8703                             const unsigned char *addr)
8704 {
8705         struct hclge_vport *vport = hclge_get_vport(handle);
8706
8707         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8708                                      addr);
8709 }
8710
8711 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8712                             const unsigned char *addr)
8713 {
8714         struct hclge_dev *hdev = vport->back;
8715         struct hclge_mac_vlan_tbl_entry_cmd req;
8716         int ret;
8717
8718         /* mac addr check */
8719         if (is_zero_ether_addr(addr) ||
8720             is_broadcast_ether_addr(addr) ||
8721             is_multicast_ether_addr(addr)) {
8722                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8723                         addr);
8724                 return -EINVAL;
8725         }
8726
8727         memset(&req, 0, sizeof(req));
8728         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8729         hclge_prepare_mac_addr(&req, addr, false);
8730         ret = hclge_remove_mac_vlan_tbl(vport, &req);
8731         if (!ret) {
8732                 mutex_lock(&hdev->vport_lock);
8733                 hclge_update_umv_space(vport, true);
8734                 mutex_unlock(&hdev->vport_lock);
8735         } else if (ret == -ENOENT) {
8736                 ret = 0;
8737         }
8738
8739         return ret;
8740 }
8741
8742 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8743                              const unsigned char *addr)
8744 {
8745         struct hclge_vport *vport = hclge_get_vport(handle);
8746
8747         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8748                                      addr);
8749 }
8750
8751 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8752                              const unsigned char *addr)
8753 {
8754         struct hclge_dev *hdev = vport->back;
8755         struct hclge_mac_vlan_tbl_entry_cmd req;
8756         struct hclge_desc desc[3];
8757         int status;
8758
8759         /* mac addr check */
8760         if (!is_multicast_ether_addr(addr)) {
8761                 dev_err(&hdev->pdev->dev,
8762                         "Add mc mac err! invalid mac:%pM.\n",
8763                          addr);
8764                 return -EINVAL;
8765         }
8766         memset(&req, 0, sizeof(req));
8767         hclge_prepare_mac_addr(&req, addr, true);
8768         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8769         if (status) {
8770                 /* This mac addr do not exist, add new entry for it */
8771                 memset(desc[0].data, 0, sizeof(desc[0].data));
8772                 memset(desc[1].data, 0, sizeof(desc[0].data));
8773                 memset(desc[2].data, 0, sizeof(desc[0].data));
8774         }
8775         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8776         if (status)
8777                 return status;
8778         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8779         /* if already overflow, not to print each time */
8780         if (status == -ENOSPC &&
8781             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8782                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8783
8784         return status;
8785 }
8786
8787 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8788                             const unsigned char *addr)
8789 {
8790         struct hclge_vport *vport = hclge_get_vport(handle);
8791
8792         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8793                                      addr);
8794 }
8795
8796 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8797                             const unsigned char *addr)
8798 {
8799         struct hclge_dev *hdev = vport->back;
8800         struct hclge_mac_vlan_tbl_entry_cmd req;
8801         enum hclge_cmd_status status;
8802         struct hclge_desc desc[3];
8803
8804         /* mac addr check */
8805         if (!is_multicast_ether_addr(addr)) {
8806                 dev_dbg(&hdev->pdev->dev,
8807                         "Remove mc mac err! invalid mac:%pM.\n",
8808                          addr);
8809                 return -EINVAL;
8810         }
8811
8812         memset(&req, 0, sizeof(req));
8813         hclge_prepare_mac_addr(&req, addr, true);
8814         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8815         if (!status) {
8816                 /* This mac addr exist, remove this handle's VFID for it */
8817                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8818                 if (status)
8819                         return status;
8820
8821                 if (hclge_is_all_function_id_zero(desc))
8822                         /* All the vfid is zero, so need to delete this entry */
8823                         status = hclge_remove_mac_vlan_tbl(vport, &req);
8824                 else
8825                         /* Not all the vfid is zero, update the vfid */
8826                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8827         } else if (status == -ENOENT) {
8828                 status = 0;
8829         }
8830
8831         return status;
8832 }
8833
8834 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8835                                       struct list_head *list,
8836                                       int (*sync)(struct hclge_vport *,
8837                                                   const unsigned char *))
8838 {
8839         struct hclge_mac_node *mac_node, *tmp;
8840         int ret;
8841
8842         list_for_each_entry_safe(mac_node, tmp, list, node) {
8843                 ret = sync(vport, mac_node->mac_addr);
8844                 if (!ret) {
8845                         mac_node->state = HCLGE_MAC_ACTIVE;
8846                 } else {
8847                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8848                                 &vport->state);
8849                         break;
8850                 }
8851         }
8852 }
8853
8854 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8855                                         struct list_head *list,
8856                                         int (*unsync)(struct hclge_vport *,
8857                                                       const unsigned char *))
8858 {
8859         struct hclge_mac_node *mac_node, *tmp;
8860         int ret;
8861
8862         list_for_each_entry_safe(mac_node, tmp, list, node) {
8863                 ret = unsync(vport, mac_node->mac_addr);
8864                 if (!ret || ret == -ENOENT) {
8865                         list_del(&mac_node->node);
8866                         kfree(mac_node);
8867                 } else {
8868                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8869                                 &vport->state);
8870                         break;
8871                 }
8872         }
8873 }
8874
8875 static bool hclge_sync_from_add_list(struct list_head *add_list,
8876                                      struct list_head *mac_list)
8877 {
8878         struct hclge_mac_node *mac_node, *tmp, *new_node;
8879         bool all_added = true;
8880
8881         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8882                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8883                         all_added = false;
8884
8885                 /* if the mac address from tmp_add_list is not in the
8886                  * uc/mc_mac_list, it means have received a TO_DEL request
8887                  * during the time window of adding the mac address into mac
8888                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8889                  * then it will be removed at next time. else it must be TO_ADD,
8890                  * this address hasn't been added into mac table,
8891                  * so just remove the mac node.
8892                  */
8893                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8894                 if (new_node) {
8895                         hclge_update_mac_node(new_node, mac_node->state);
8896                         list_del(&mac_node->node);
8897                         kfree(mac_node);
8898                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8899                         mac_node->state = HCLGE_MAC_TO_DEL;
8900                         list_move_tail(&mac_node->node, mac_list);
8901                 } else {
8902                         list_del(&mac_node->node);
8903                         kfree(mac_node);
8904                 }
8905         }
8906
8907         return all_added;
8908 }
8909
8910 static void hclge_sync_from_del_list(struct list_head *del_list,
8911                                      struct list_head *mac_list)
8912 {
8913         struct hclge_mac_node *mac_node, *tmp, *new_node;
8914
8915         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8916                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8917                 if (new_node) {
8918                         /* If the mac addr exists in the mac list, it means
8919                          * received a new TO_ADD request during the time window
8920                          * of configuring the mac address. For the mac node
8921                          * state is TO_ADD, and the address is already in the
8922                          * in the hardware(due to delete fail), so we just need
8923                          * to change the mac node state to ACTIVE.
8924                          */
8925                         new_node->state = HCLGE_MAC_ACTIVE;
8926                         list_del(&mac_node->node);
8927                         kfree(mac_node);
8928                 } else {
8929                         list_move_tail(&mac_node->node, mac_list);
8930                 }
8931         }
8932 }
8933
8934 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8935                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
8936                                         bool is_all_added)
8937 {
8938         if (mac_type == HCLGE_MAC_ADDR_UC) {
8939                 if (is_all_added)
8940                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8941                 else
8942                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8943         } else {
8944                 if (is_all_added)
8945                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8946                 else
8947                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8948         }
8949 }
8950
8951 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8952                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
8953 {
8954         struct hclge_mac_node *mac_node, *tmp, *new_node;
8955         struct list_head tmp_add_list, tmp_del_list;
8956         struct list_head *list;
8957         bool all_added;
8958
8959         INIT_LIST_HEAD(&tmp_add_list);
8960         INIT_LIST_HEAD(&tmp_del_list);
8961
8962         /* move the mac addr to the tmp_add_list and tmp_del_list, then
8963          * we can add/delete these mac addr outside the spin lock
8964          */
8965         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8966                 &vport->uc_mac_list : &vport->mc_mac_list;
8967
8968         spin_lock_bh(&vport->mac_list_lock);
8969
8970         list_for_each_entry_safe(mac_node, tmp, list, node) {
8971                 switch (mac_node->state) {
8972                 case HCLGE_MAC_TO_DEL:
8973                         list_move_tail(&mac_node->node, &tmp_del_list);
8974                         break;
8975                 case HCLGE_MAC_TO_ADD:
8976                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8977                         if (!new_node)
8978                                 goto stop_traverse;
8979                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8980                         new_node->state = mac_node->state;
8981                         list_add_tail(&new_node->node, &tmp_add_list);
8982                         break;
8983                 default:
8984                         break;
8985                 }
8986         }
8987
8988 stop_traverse:
8989         spin_unlock_bh(&vport->mac_list_lock);
8990
8991         /* delete first, in order to get max mac table space for adding */
8992         if (mac_type == HCLGE_MAC_ADDR_UC) {
8993                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8994                                             hclge_rm_uc_addr_common);
8995                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8996                                           hclge_add_uc_addr_common);
8997         } else {
8998                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8999                                             hclge_rm_mc_addr_common);
9000                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9001                                           hclge_add_mc_addr_common);
9002         }
9003
9004         /* if some mac addresses were added/deleted fail, move back to the
9005          * mac_list, and retry at next time.
9006          */
9007         spin_lock_bh(&vport->mac_list_lock);
9008
9009         hclge_sync_from_del_list(&tmp_del_list, list);
9010         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9011
9012         spin_unlock_bh(&vport->mac_list_lock);
9013
9014         hclge_update_overflow_flags(vport, mac_type, all_added);
9015 }
9016
9017 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9018 {
9019         struct hclge_dev *hdev = vport->back;
9020
9021         if (test_bit(vport->vport_id, hdev->vport_config_block))
9022                 return false;
9023
9024         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9025                 return true;
9026
9027         return false;
9028 }
9029
9030 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9031 {
9032         int i;
9033
9034         for (i = 0; i < hdev->num_alloc_vport; i++) {
9035                 struct hclge_vport *vport = &hdev->vport[i];
9036
9037                 if (!hclge_need_sync_mac_table(vport))
9038                         continue;
9039
9040                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9041                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9042         }
9043 }
9044
9045 static void hclge_build_del_list(struct list_head *list,
9046                                  bool is_del_list,
9047                                  struct list_head *tmp_del_list)
9048 {
9049         struct hclge_mac_node *mac_cfg, *tmp;
9050
9051         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9052                 switch (mac_cfg->state) {
9053                 case HCLGE_MAC_TO_DEL:
9054                 case HCLGE_MAC_ACTIVE:
9055                         list_move_tail(&mac_cfg->node, tmp_del_list);
9056                         break;
9057                 case HCLGE_MAC_TO_ADD:
9058                         if (is_del_list) {
9059                                 list_del(&mac_cfg->node);
9060                                 kfree(mac_cfg);
9061                         }
9062                         break;
9063                 }
9064         }
9065 }
9066
9067 static void hclge_unsync_del_list(struct hclge_vport *vport,
9068                                   int (*unsync)(struct hclge_vport *vport,
9069                                                 const unsigned char *addr),
9070                                   bool is_del_list,
9071                                   struct list_head *tmp_del_list)
9072 {
9073         struct hclge_mac_node *mac_cfg, *tmp;
9074         int ret;
9075
9076         list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9077                 ret = unsync(vport, mac_cfg->mac_addr);
9078                 if (!ret || ret == -ENOENT) {
9079                         /* clear all mac addr from hardware, but remain these
9080                          * mac addr in the mac list, and restore them after
9081                          * vf reset finished.
9082                          */
9083                         if (!is_del_list &&
9084                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
9085                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
9086                         } else {
9087                                 list_del(&mac_cfg->node);
9088                                 kfree(mac_cfg);
9089                         }
9090                 } else if (is_del_list) {
9091                         mac_cfg->state = HCLGE_MAC_TO_DEL;
9092                 }
9093         }
9094 }
9095
9096 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9097                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
9098 {
9099         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9100         struct hclge_dev *hdev = vport->back;
9101         struct list_head tmp_del_list, *list;
9102
9103         if (mac_type == HCLGE_MAC_ADDR_UC) {
9104                 list = &vport->uc_mac_list;
9105                 unsync = hclge_rm_uc_addr_common;
9106         } else {
9107                 list = &vport->mc_mac_list;
9108                 unsync = hclge_rm_mc_addr_common;
9109         }
9110
9111         INIT_LIST_HEAD(&tmp_del_list);
9112
9113         if (!is_del_list)
9114                 set_bit(vport->vport_id, hdev->vport_config_block);
9115
9116         spin_lock_bh(&vport->mac_list_lock);
9117
9118         hclge_build_del_list(list, is_del_list, &tmp_del_list);
9119
9120         spin_unlock_bh(&vport->mac_list_lock);
9121
9122         hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9123
9124         spin_lock_bh(&vport->mac_list_lock);
9125
9126         hclge_sync_from_del_list(&tmp_del_list, list);
9127
9128         spin_unlock_bh(&vport->mac_list_lock);
9129 }
9130
9131 /* remove all mac address when uninitailize */
9132 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9133                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
9134 {
9135         struct hclge_mac_node *mac_node, *tmp;
9136         struct hclge_dev *hdev = vport->back;
9137         struct list_head tmp_del_list, *list;
9138
9139         INIT_LIST_HEAD(&tmp_del_list);
9140
9141         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9142                 &vport->uc_mac_list : &vport->mc_mac_list;
9143
9144         spin_lock_bh(&vport->mac_list_lock);
9145
9146         list_for_each_entry_safe(mac_node, tmp, list, node) {
9147                 switch (mac_node->state) {
9148                 case HCLGE_MAC_TO_DEL:
9149                 case HCLGE_MAC_ACTIVE:
9150                         list_move_tail(&mac_node->node, &tmp_del_list);
9151                         break;
9152                 case HCLGE_MAC_TO_ADD:
9153                         list_del(&mac_node->node);
9154                         kfree(mac_node);
9155                         break;
9156                 }
9157         }
9158
9159         spin_unlock_bh(&vport->mac_list_lock);
9160
9161         if (mac_type == HCLGE_MAC_ADDR_UC)
9162                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9163                                             hclge_rm_uc_addr_common);
9164         else
9165                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9166                                             hclge_rm_mc_addr_common);
9167
9168         if (!list_empty(&tmp_del_list))
9169                 dev_warn(&hdev->pdev->dev,
9170                          "uninit %s mac list for vport %u not completely.\n",
9171                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9172                          vport->vport_id);
9173
9174         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9175                 list_del(&mac_node->node);
9176                 kfree(mac_node);
9177         }
9178 }
9179
9180 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9181 {
9182         struct hclge_vport *vport;
9183         int i;
9184
9185         for (i = 0; i < hdev->num_alloc_vport; i++) {
9186                 vport = &hdev->vport[i];
9187                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9188                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9189         }
9190 }
9191
9192 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9193                                               u16 cmdq_resp, u8 resp_code)
9194 {
9195 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
9196 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
9197 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
9198 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
9199
9200         int return_status;
9201
9202         if (cmdq_resp) {
9203                 dev_err(&hdev->pdev->dev,
9204                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9205                         cmdq_resp);
9206                 return -EIO;
9207         }
9208
9209         switch (resp_code) {
9210         case HCLGE_ETHERTYPE_SUCCESS_ADD:
9211         case HCLGE_ETHERTYPE_ALREADY_ADD:
9212                 return_status = 0;
9213                 break;
9214         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9215                 dev_err(&hdev->pdev->dev,
9216                         "add mac ethertype failed for manager table overflow.\n");
9217                 return_status = -EIO;
9218                 break;
9219         case HCLGE_ETHERTYPE_KEY_CONFLICT:
9220                 dev_err(&hdev->pdev->dev,
9221                         "add mac ethertype failed for key conflict.\n");
9222                 return_status = -EIO;
9223                 break;
9224         default:
9225                 dev_err(&hdev->pdev->dev,
9226                         "add mac ethertype failed for undefined, code=%u.\n",
9227                         resp_code);
9228                 return_status = -EIO;
9229         }
9230
9231         return return_status;
9232 }
9233
9234 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9235                                      u8 *mac_addr)
9236 {
9237         struct hclge_mac_vlan_tbl_entry_cmd req;
9238         struct hclge_dev *hdev = vport->back;
9239         struct hclge_desc desc;
9240         u16 egress_port = 0;
9241         int i;
9242
9243         if (is_zero_ether_addr(mac_addr))
9244                 return false;
9245
9246         memset(&req, 0, sizeof(req));
9247         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9248                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9249         req.egress_port = cpu_to_le16(egress_port);
9250         hclge_prepare_mac_addr(&req, mac_addr, false);
9251
9252         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9253                 return true;
9254
9255         vf_idx += HCLGE_VF_VPORT_START_NUM;
9256         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9257                 if (i != vf_idx &&
9258                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9259                         return true;
9260
9261         return false;
9262 }
9263
9264 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9265                             u8 *mac_addr)
9266 {
9267         struct hclge_vport *vport = hclge_get_vport(handle);
9268         struct hclge_dev *hdev = vport->back;
9269
9270         vport = hclge_get_vf_vport(hdev, vf);
9271         if (!vport)
9272                 return -EINVAL;
9273
9274         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9275                 dev_info(&hdev->pdev->dev,
9276                          "Specified MAC(=%pM) is same as before, no change committed!\n",
9277                          mac_addr);
9278                 return 0;
9279         }
9280
9281         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9282                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9283                         mac_addr);
9284                 return -EEXIST;
9285         }
9286
9287         ether_addr_copy(vport->vf_info.mac, mac_addr);
9288
9289         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9290                 dev_info(&hdev->pdev->dev,
9291                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9292                          vf, mac_addr);
9293                 return hclge_inform_reset_assert_to_vf(vport);
9294         }
9295
9296         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9297                  vf, mac_addr);
9298         return 0;
9299 }
9300
9301 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9302                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
9303 {
9304         struct hclge_desc desc;
9305         u8 resp_code;
9306         u16 retval;
9307         int ret;
9308
9309         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9310         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9311
9312         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9313         if (ret) {
9314                 dev_err(&hdev->pdev->dev,
9315                         "add mac ethertype failed for cmd_send, ret =%d.\n",
9316                         ret);
9317                 return ret;
9318         }
9319
9320         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9321         retval = le16_to_cpu(desc.retval);
9322
9323         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9324 }
9325
9326 static int init_mgr_tbl(struct hclge_dev *hdev)
9327 {
9328         int ret;
9329         int i;
9330
9331         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9332                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9333                 if (ret) {
9334                         dev_err(&hdev->pdev->dev,
9335                                 "add mac ethertype failed, ret =%d.\n",
9336                                 ret);
9337                         return ret;
9338                 }
9339         }
9340
9341         return 0;
9342 }
9343
9344 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9345 {
9346         struct hclge_vport *vport = hclge_get_vport(handle);
9347         struct hclge_dev *hdev = vport->back;
9348
9349         ether_addr_copy(p, hdev->hw.mac.mac_addr);
9350 }
9351
9352 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9353                                        const u8 *old_addr, const u8 *new_addr)
9354 {
9355         struct list_head *list = &vport->uc_mac_list;
9356         struct hclge_mac_node *old_node, *new_node;
9357
9358         new_node = hclge_find_mac_node(list, new_addr);
9359         if (!new_node) {
9360                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9361                 if (!new_node)
9362                         return -ENOMEM;
9363
9364                 new_node->state = HCLGE_MAC_TO_ADD;
9365                 ether_addr_copy(new_node->mac_addr, new_addr);
9366                 list_add(&new_node->node, list);
9367         } else {
9368                 if (new_node->state == HCLGE_MAC_TO_DEL)
9369                         new_node->state = HCLGE_MAC_ACTIVE;
9370
9371                 /* make sure the new addr is in the list head, avoid dev
9372                  * addr may be not re-added into mac table for the umv space
9373                  * limitation after global/imp reset which will clear mac
9374                  * table by hardware.
9375                  */
9376                 list_move(&new_node->node, list);
9377         }
9378
9379         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9380                 old_node = hclge_find_mac_node(list, old_addr);
9381                 if (old_node) {
9382                         if (old_node->state == HCLGE_MAC_TO_ADD) {
9383                                 list_del(&old_node->node);
9384                                 kfree(old_node);
9385                         } else {
9386                                 old_node->state = HCLGE_MAC_TO_DEL;
9387                         }
9388                 }
9389         }
9390
9391         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9392
9393         return 0;
9394 }
9395
9396 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9397                               bool is_first)
9398 {
9399         const unsigned char *new_addr = (const unsigned char *)p;
9400         struct hclge_vport *vport = hclge_get_vport(handle);
9401         struct hclge_dev *hdev = vport->back;
9402         unsigned char *old_addr = NULL;
9403         int ret;
9404
9405         /* mac addr check */
9406         if (is_zero_ether_addr(new_addr) ||
9407             is_broadcast_ether_addr(new_addr) ||
9408             is_multicast_ether_addr(new_addr)) {
9409                 dev_err(&hdev->pdev->dev,
9410                         "change uc mac err! invalid mac: %pM.\n",
9411                          new_addr);
9412                 return -EINVAL;
9413         }
9414
9415         ret = hclge_pause_addr_cfg(hdev, new_addr);
9416         if (ret) {
9417                 dev_err(&hdev->pdev->dev,
9418                         "failed to configure mac pause address, ret = %d\n",
9419                         ret);
9420                 return ret;
9421         }
9422
9423         if (!is_first)
9424                 old_addr = hdev->hw.mac.mac_addr;
9425
9426         spin_lock_bh(&vport->mac_list_lock);
9427         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9428         if (ret) {
9429                 dev_err(&hdev->pdev->dev,
9430                         "failed to change the mac addr:%pM, ret = %d\n",
9431                         new_addr, ret);
9432                 spin_unlock_bh(&vport->mac_list_lock);
9433
9434                 if (!is_first)
9435                         hclge_pause_addr_cfg(hdev, old_addr);
9436
9437                 return ret;
9438         }
9439         /* we must update dev addr with spin lock protect, preventing dev addr
9440          * being removed by set_rx_mode path.
9441          */
9442         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9443         spin_unlock_bh(&vport->mac_list_lock);
9444
9445         hclge_task_schedule(hdev, 0);
9446
9447         return 0;
9448 }
9449
9450 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9451 {
9452         struct mii_ioctl_data *data = if_mii(ifr);
9453
9454         if (!hnae3_dev_phy_imp_supported(hdev))
9455                 return -EOPNOTSUPP;
9456
9457         switch (cmd) {
9458         case SIOCGMIIPHY:
9459                 data->phy_id = hdev->hw.mac.phy_addr;
9460                 /* this command reads phy id and register at the same time */
9461                 fallthrough;
9462         case SIOCGMIIREG:
9463                 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9464                 return 0;
9465
9466         case SIOCSMIIREG:
9467                 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9468         default:
9469                 return -EOPNOTSUPP;
9470         }
9471 }
9472
9473 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9474                           int cmd)
9475 {
9476         struct hclge_vport *vport = hclge_get_vport(handle);
9477         struct hclge_dev *hdev = vport->back;
9478
9479         switch (cmd) {
9480         case SIOCGHWTSTAMP:
9481                 return hclge_ptp_get_cfg(hdev, ifr);
9482         case SIOCSHWTSTAMP:
9483                 return hclge_ptp_set_cfg(hdev, ifr);
9484         default:
9485                 if (!hdev->hw.mac.phydev)
9486                         return hclge_mii_ioctl(hdev, ifr, cmd);
9487         }
9488
9489         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9490 }
9491
9492 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9493                                              bool bypass_en)
9494 {
9495         struct hclge_port_vlan_filter_bypass_cmd *req;
9496         struct hclge_desc desc;
9497         int ret;
9498
9499         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9500         req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9501         req->vf_id = vf_id;
9502         hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9503                       bypass_en ? 1 : 0);
9504
9505         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9506         if (ret)
9507                 dev_err(&hdev->pdev->dev,
9508                         "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9509                         vf_id, ret);
9510
9511         return ret;
9512 }
9513
9514 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9515                                       u8 fe_type, bool filter_en, u8 vf_id)
9516 {
9517         struct hclge_vlan_filter_ctrl_cmd *req;
9518         struct hclge_desc desc;
9519         int ret;
9520
9521         /* read current vlan filter parameter */
9522         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9523         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9524         req->vlan_type = vlan_type;
9525         req->vf_id = vf_id;
9526
9527         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9528         if (ret) {
9529                 dev_err(&hdev->pdev->dev,
9530                         "failed to get vlan filter config, ret = %d.\n", ret);
9531                 return ret;
9532         }
9533
9534         /* modify and write new config parameter */
9535         hclge_cmd_reuse_desc(&desc, false);
9536         req->vlan_fe = filter_en ?
9537                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9538
9539         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9540         if (ret)
9541                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9542                         ret);
9543
9544         return ret;
9545 }
9546
9547 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9548 {
9549         struct hclge_dev *hdev = vport->back;
9550         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9551         int ret;
9552
9553         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9554                 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9555                                                   HCLGE_FILTER_FE_EGRESS_V1_B,
9556                                                   enable, vport->vport_id);
9557
9558         ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9559                                          HCLGE_FILTER_FE_EGRESS, enable,
9560                                          vport->vport_id);
9561         if (ret)
9562                 return ret;
9563
9564         if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9565                 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9566                                                         !enable);
9567         } else if (!vport->vport_id) {
9568                 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9569                         enable = false;
9570
9571                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9572                                                  HCLGE_FILTER_FE_INGRESS,
9573                                                  enable, 0);
9574         }
9575
9576         return ret;
9577 }
9578
9579 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9580 {
9581         struct hnae3_handle *handle = &vport->nic;
9582         struct hclge_vport_vlan_cfg *vlan, *tmp;
9583         struct hclge_dev *hdev = vport->back;
9584
9585         if (vport->vport_id) {
9586                 if (vport->port_base_vlan_cfg.state !=
9587                         HNAE3_PORT_BASE_VLAN_DISABLE)
9588                         return true;
9589
9590                 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9591                         return false;
9592         } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9593                 return false;
9594         }
9595
9596         if (!vport->req_vlan_fltr_en)
9597                 return false;
9598
9599         /* compatible with former device, always enable vlan filter */
9600         if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9601                 return true;
9602
9603         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9604                 if (vlan->vlan_id != 0)
9605                         return true;
9606
9607         return false;
9608 }
9609
9610 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9611 {
9612         struct hclge_dev *hdev = vport->back;
9613         bool need_en;
9614         int ret;
9615
9616         mutex_lock(&hdev->vport_lock);
9617
9618         vport->req_vlan_fltr_en = request_en;
9619
9620         need_en = hclge_need_enable_vport_vlan_filter(vport);
9621         if (need_en == vport->cur_vlan_fltr_en) {
9622                 mutex_unlock(&hdev->vport_lock);
9623                 return 0;
9624         }
9625
9626         ret = hclge_set_vport_vlan_filter(vport, need_en);
9627         if (ret) {
9628                 mutex_unlock(&hdev->vport_lock);
9629                 return ret;
9630         }
9631
9632         vport->cur_vlan_fltr_en = need_en;
9633
9634         mutex_unlock(&hdev->vport_lock);
9635
9636         return 0;
9637 }
9638
9639 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9640 {
9641         struct hclge_vport *vport = hclge_get_vport(handle);
9642
9643         return hclge_enable_vport_vlan_filter(vport, enable);
9644 }
9645
9646 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9647                                         bool is_kill, u16 vlan,
9648                                         struct hclge_desc *desc)
9649 {
9650         struct hclge_vlan_filter_vf_cfg_cmd *req0;
9651         struct hclge_vlan_filter_vf_cfg_cmd *req1;
9652         u8 vf_byte_val;
9653         u8 vf_byte_off;
9654         int ret;
9655
9656         hclge_cmd_setup_basic_desc(&desc[0],
9657                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9658         hclge_cmd_setup_basic_desc(&desc[1],
9659                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9660
9661         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9662
9663         vf_byte_off = vfid / 8;
9664         vf_byte_val = 1 << (vfid % 8);
9665
9666         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9667         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9668
9669         req0->vlan_id  = cpu_to_le16(vlan);
9670         req0->vlan_cfg = is_kill;
9671
9672         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9673                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9674         else
9675                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9676
9677         ret = hclge_cmd_send(&hdev->hw, desc, 2);
9678         if (ret) {
9679                 dev_err(&hdev->pdev->dev,
9680                         "Send vf vlan command fail, ret =%d.\n",
9681                         ret);
9682                 return ret;
9683         }
9684
9685         return 0;
9686 }
9687
9688 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9689                                           bool is_kill, struct hclge_desc *desc)
9690 {
9691         struct hclge_vlan_filter_vf_cfg_cmd *req;
9692
9693         req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9694
9695         if (!is_kill) {
9696 #define HCLGE_VF_VLAN_NO_ENTRY  2
9697                 if (!req->resp_code || req->resp_code == 1)
9698                         return 0;
9699
9700                 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9701                         set_bit(vfid, hdev->vf_vlan_full);
9702                         dev_warn(&hdev->pdev->dev,
9703                                  "vf vlan table is full, vf vlan filter is disabled\n");
9704                         return 0;
9705                 }
9706
9707                 dev_err(&hdev->pdev->dev,
9708                         "Add vf vlan filter fail, ret =%u.\n",
9709                         req->resp_code);
9710         } else {
9711 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
9712                 if (!req->resp_code)
9713                         return 0;
9714
9715                 /* vf vlan filter is disabled when vf vlan table is full,
9716                  * then new vlan id will not be added into vf vlan table.
9717                  * Just return 0 without warning, avoid massive verbose
9718                  * print logs when unload.
9719                  */
9720                 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9721                         return 0;
9722
9723                 dev_err(&hdev->pdev->dev,
9724                         "Kill vf vlan filter fail, ret =%u.\n",
9725                         req->resp_code);
9726         }
9727
9728         return -EIO;
9729 }
9730
9731 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9732                                     bool is_kill, u16 vlan)
9733 {
9734         struct hclge_vport *vport = &hdev->vport[vfid];
9735         struct hclge_desc desc[2];
9736         int ret;
9737
9738         /* if vf vlan table is full, firmware will close vf vlan filter, it
9739          * is unable and unnecessary to add new vlan id to vf vlan filter.
9740          * If spoof check is enable, and vf vlan is full, it shouldn't add
9741          * new vlan, because tx packets with these vlan id will be dropped.
9742          */
9743         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9744                 if (vport->vf_info.spoofchk && vlan) {
9745                         dev_err(&hdev->pdev->dev,
9746                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9747                         return -EPERM;
9748                 }
9749                 return 0;
9750         }
9751
9752         ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9753         if (ret)
9754                 return ret;
9755
9756         return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9757 }
9758
9759 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9760                                       u16 vlan_id, bool is_kill)
9761 {
9762         struct hclge_vlan_filter_pf_cfg_cmd *req;
9763         struct hclge_desc desc;
9764         u8 vlan_offset_byte_val;
9765         u8 vlan_offset_byte;
9766         u8 vlan_offset_160;
9767         int ret;
9768
9769         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9770
9771         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9772         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9773                            HCLGE_VLAN_BYTE_SIZE;
9774         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9775
9776         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9777         req->vlan_offset = vlan_offset_160;
9778         req->vlan_cfg = is_kill;
9779         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9780
9781         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9782         if (ret)
9783                 dev_err(&hdev->pdev->dev,
9784                         "port vlan command, send fail, ret =%d.\n", ret);
9785         return ret;
9786 }
9787
9788 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9789                                     u16 vport_id, u16 vlan_id,
9790                                     bool is_kill)
9791 {
9792         u16 vport_idx, vport_num = 0;
9793         int ret;
9794
9795         if (is_kill && !vlan_id)
9796                 return 0;
9797
9798         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9799         if (ret) {
9800                 dev_err(&hdev->pdev->dev,
9801                         "Set %u vport vlan filter config fail, ret =%d.\n",
9802                         vport_id, ret);
9803                 return ret;
9804         }
9805
9806         /* vlan 0 may be added twice when 8021q module is enabled */
9807         if (!is_kill && !vlan_id &&
9808             test_bit(vport_id, hdev->vlan_table[vlan_id]))
9809                 return 0;
9810
9811         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9812                 dev_err(&hdev->pdev->dev,
9813                         "Add port vlan failed, vport %u is already in vlan %u\n",
9814                         vport_id, vlan_id);
9815                 return -EINVAL;
9816         }
9817
9818         if (is_kill &&
9819             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9820                 dev_err(&hdev->pdev->dev,
9821                         "Delete port vlan failed, vport %u is not in vlan %u\n",
9822                         vport_id, vlan_id);
9823                 return -EINVAL;
9824         }
9825
9826         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9827                 vport_num++;
9828
9829         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9830                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9831                                                  is_kill);
9832
9833         return ret;
9834 }
9835
9836 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9837 {
9838         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9839         struct hclge_vport_vtag_tx_cfg_cmd *req;
9840         struct hclge_dev *hdev = vport->back;
9841         struct hclge_desc desc;
9842         u16 bmap_index;
9843         int status;
9844
9845         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9846
9847         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9848         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9849         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9850         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9851                       vcfg->accept_tag1 ? 1 : 0);
9852         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9853                       vcfg->accept_untag1 ? 1 : 0);
9854         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9855                       vcfg->accept_tag2 ? 1 : 0);
9856         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9857                       vcfg->accept_untag2 ? 1 : 0);
9858         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9859                       vcfg->insert_tag1_en ? 1 : 0);
9860         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9861                       vcfg->insert_tag2_en ? 1 : 0);
9862         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9863                       vcfg->tag_shift_mode_en ? 1 : 0);
9864         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9865
9866         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9867         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9868                         HCLGE_VF_NUM_PER_BYTE;
9869         req->vf_bitmap[bmap_index] =
9870                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9871
9872         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9873         if (status)
9874                 dev_err(&hdev->pdev->dev,
9875                         "Send port txvlan cfg command fail, ret =%d\n",
9876                         status);
9877
9878         return status;
9879 }
9880
9881 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9882 {
9883         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9884         struct hclge_vport_vtag_rx_cfg_cmd *req;
9885         struct hclge_dev *hdev = vport->back;
9886         struct hclge_desc desc;
9887         u16 bmap_index;
9888         int status;
9889
9890         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9891
9892         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9893         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9894                       vcfg->strip_tag1_en ? 1 : 0);
9895         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9896                       vcfg->strip_tag2_en ? 1 : 0);
9897         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9898                       vcfg->vlan1_vlan_prionly ? 1 : 0);
9899         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9900                       vcfg->vlan2_vlan_prionly ? 1 : 0);
9901         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9902                       vcfg->strip_tag1_discard_en ? 1 : 0);
9903         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9904                       vcfg->strip_tag2_discard_en ? 1 : 0);
9905
9906         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9907         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9908                         HCLGE_VF_NUM_PER_BYTE;
9909         req->vf_bitmap[bmap_index] =
9910                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9911
9912         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9913         if (status)
9914                 dev_err(&hdev->pdev->dev,
9915                         "Send port rxvlan cfg command fail, ret =%d\n",
9916                         status);
9917
9918         return status;
9919 }
9920
9921 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9922                                   u16 port_base_vlan_state,
9923                                   u16 vlan_tag, u8 qos)
9924 {
9925         int ret;
9926
9927         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9928                 vport->txvlan_cfg.accept_tag1 = true;
9929                 vport->txvlan_cfg.insert_tag1_en = false;
9930                 vport->txvlan_cfg.default_tag1 = 0;
9931         } else {
9932                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9933
9934                 vport->txvlan_cfg.accept_tag1 =
9935                         ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9936                 vport->txvlan_cfg.insert_tag1_en = true;
9937                 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9938                                                  vlan_tag;
9939         }
9940
9941         vport->txvlan_cfg.accept_untag1 = true;
9942
9943         /* accept_tag2 and accept_untag2 are not supported on
9944          * pdev revision(0x20), new revision support them,
9945          * this two fields can not be configured by user.
9946          */
9947         vport->txvlan_cfg.accept_tag2 = true;
9948         vport->txvlan_cfg.accept_untag2 = true;
9949         vport->txvlan_cfg.insert_tag2_en = false;
9950         vport->txvlan_cfg.default_tag2 = 0;
9951         vport->txvlan_cfg.tag_shift_mode_en = true;
9952
9953         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9954                 vport->rxvlan_cfg.strip_tag1_en = false;
9955                 vport->rxvlan_cfg.strip_tag2_en =
9956                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9957                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9958         } else {
9959                 vport->rxvlan_cfg.strip_tag1_en =
9960                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9961                 vport->rxvlan_cfg.strip_tag2_en = true;
9962                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9963         }
9964
9965         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9966         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9967         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9968
9969         ret = hclge_set_vlan_tx_offload_cfg(vport);
9970         if (ret)
9971                 return ret;
9972
9973         return hclge_set_vlan_rx_offload_cfg(vport);
9974 }
9975
9976 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9977 {
9978         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9979         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9980         struct hclge_desc desc;
9981         int status;
9982
9983         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9984         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9985         rx_req->ot_fst_vlan_type =
9986                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9987         rx_req->ot_sec_vlan_type =
9988                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9989         rx_req->in_fst_vlan_type =
9990                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9991         rx_req->in_sec_vlan_type =
9992                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9993
9994         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9995         if (status) {
9996                 dev_err(&hdev->pdev->dev,
9997                         "Send rxvlan protocol type command fail, ret =%d\n",
9998                         status);
9999                 return status;
10000         }
10001
10002         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10003
10004         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10005         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10006         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10007
10008         status = hclge_cmd_send(&hdev->hw, &desc, 1);
10009         if (status)
10010                 dev_err(&hdev->pdev->dev,
10011                         "Send txvlan protocol type command fail, ret =%d\n",
10012                         status);
10013
10014         return status;
10015 }
10016
10017 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10018 {
10019 #define HCLGE_DEF_VLAN_TYPE             0x8100
10020
10021         struct hnae3_handle *handle = &hdev->vport[0].nic;
10022         struct hclge_vport *vport;
10023         int ret;
10024         int i;
10025
10026         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10027                 /* for revision 0x21, vf vlan filter is per function */
10028                 for (i = 0; i < hdev->num_alloc_vport; i++) {
10029                         vport = &hdev->vport[i];
10030                         ret = hclge_set_vlan_filter_ctrl(hdev,
10031                                                          HCLGE_FILTER_TYPE_VF,
10032                                                          HCLGE_FILTER_FE_EGRESS,
10033                                                          true,
10034                                                          vport->vport_id);
10035                         if (ret)
10036                                 return ret;
10037                         vport->cur_vlan_fltr_en = true;
10038                 }
10039
10040                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10041                                                  HCLGE_FILTER_FE_INGRESS, true,
10042                                                  0);
10043                 if (ret)
10044                         return ret;
10045         } else {
10046                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10047                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
10048                                                  true, 0);
10049                 if (ret)
10050                         return ret;
10051         }
10052
10053         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10054         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10055         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10056         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10057         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10058         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10059
10060         ret = hclge_set_vlan_protocol_type(hdev);
10061         if (ret)
10062                 return ret;
10063
10064         for (i = 0; i < hdev->num_alloc_vport; i++) {
10065                 u16 vlan_tag;
10066                 u8 qos;
10067
10068                 vport = &hdev->vport[i];
10069                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10070                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
10071
10072                 ret = hclge_vlan_offload_cfg(vport,
10073                                              vport->port_base_vlan_cfg.state,
10074                                              vlan_tag, qos);
10075                 if (ret)
10076                         return ret;
10077         }
10078
10079         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10080 }
10081
10082 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10083                                        bool writen_to_tbl)
10084 {
10085         struct hclge_vport_vlan_cfg *vlan, *tmp;
10086
10087         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10088                 if (vlan->vlan_id == vlan_id)
10089                         return;
10090
10091         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10092         if (!vlan)
10093                 return;
10094
10095         vlan->hd_tbl_status = writen_to_tbl;
10096         vlan->vlan_id = vlan_id;
10097
10098         list_add_tail(&vlan->node, &vport->vlan_list);
10099 }
10100
10101 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10102 {
10103         struct hclge_vport_vlan_cfg *vlan, *tmp;
10104         struct hclge_dev *hdev = vport->back;
10105         int ret;
10106
10107         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10108                 if (!vlan->hd_tbl_status) {
10109                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10110                                                        vport->vport_id,
10111                                                        vlan->vlan_id, false);
10112                         if (ret) {
10113                                 dev_err(&hdev->pdev->dev,
10114                                         "restore vport vlan list failed, ret=%d\n",
10115                                         ret);
10116                                 return ret;
10117                         }
10118                 }
10119                 vlan->hd_tbl_status = true;
10120         }
10121
10122         return 0;
10123 }
10124
10125 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10126                                       bool is_write_tbl)
10127 {
10128         struct hclge_vport_vlan_cfg *vlan, *tmp;
10129         struct hclge_dev *hdev = vport->back;
10130
10131         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10132                 if (vlan->vlan_id == vlan_id) {
10133                         if (is_write_tbl && vlan->hd_tbl_status)
10134                                 hclge_set_vlan_filter_hw(hdev,
10135                                                          htons(ETH_P_8021Q),
10136                                                          vport->vport_id,
10137                                                          vlan_id,
10138                                                          true);
10139
10140                         list_del(&vlan->node);
10141                         kfree(vlan);
10142                         break;
10143                 }
10144         }
10145 }
10146
10147 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10148 {
10149         struct hclge_vport_vlan_cfg *vlan, *tmp;
10150         struct hclge_dev *hdev = vport->back;
10151
10152         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10153                 if (vlan->hd_tbl_status)
10154                         hclge_set_vlan_filter_hw(hdev,
10155                                                  htons(ETH_P_8021Q),
10156                                                  vport->vport_id,
10157                                                  vlan->vlan_id,
10158                                                  true);
10159
10160                 vlan->hd_tbl_status = false;
10161                 if (is_del_list) {
10162                         list_del(&vlan->node);
10163                         kfree(vlan);
10164                 }
10165         }
10166         clear_bit(vport->vport_id, hdev->vf_vlan_full);
10167 }
10168
10169 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10170 {
10171         struct hclge_vport_vlan_cfg *vlan, *tmp;
10172         struct hclge_vport *vport;
10173         int i;
10174
10175         for (i = 0; i < hdev->num_alloc_vport; i++) {
10176                 vport = &hdev->vport[i];
10177                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10178                         list_del(&vlan->node);
10179                         kfree(vlan);
10180                 }
10181         }
10182 }
10183
10184 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10185 {
10186         struct hclge_vport_vlan_cfg *vlan, *tmp;
10187         struct hclge_dev *hdev = vport->back;
10188         u16 vlan_proto;
10189         u16 vlan_id;
10190         u16 state;
10191         int ret;
10192
10193         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10194         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10195         state = vport->port_base_vlan_cfg.state;
10196
10197         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10198                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10199                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10200                                          vport->vport_id, vlan_id,
10201                                          false);
10202                 return;
10203         }
10204
10205         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10206                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10207                                                vport->vport_id,
10208                                                vlan->vlan_id, false);
10209                 if (ret)
10210                         break;
10211                 vlan->hd_tbl_status = true;
10212         }
10213 }
10214
10215 /* For global reset and imp reset, hardware will clear the mac table,
10216  * so we change the mac address state from ACTIVE to TO_ADD, then they
10217  * can be restored in the service task after reset complete. Furtherly,
10218  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10219  * be restored after reset, so just remove these mac nodes from mac_list.
10220  */
10221 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10222 {
10223         struct hclge_mac_node *mac_node, *tmp;
10224
10225         list_for_each_entry_safe(mac_node, tmp, list, node) {
10226                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10227                         mac_node->state = HCLGE_MAC_TO_ADD;
10228                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10229                         list_del(&mac_node->node);
10230                         kfree(mac_node);
10231                 }
10232         }
10233 }
10234
10235 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10236 {
10237         spin_lock_bh(&vport->mac_list_lock);
10238
10239         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10240         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10241         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10242
10243         spin_unlock_bh(&vport->mac_list_lock);
10244 }
10245
10246 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10247 {
10248         struct hclge_vport *vport = &hdev->vport[0];
10249         struct hnae3_handle *handle = &vport->nic;
10250
10251         hclge_restore_mac_table_common(vport);
10252         hclge_restore_vport_vlan_table(vport);
10253         set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10254         hclge_restore_fd_entries(handle);
10255 }
10256
10257 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10258 {
10259         struct hclge_vport *vport = hclge_get_vport(handle);
10260
10261         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10262                 vport->rxvlan_cfg.strip_tag1_en = false;
10263                 vport->rxvlan_cfg.strip_tag2_en = enable;
10264                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10265         } else {
10266                 vport->rxvlan_cfg.strip_tag1_en = enable;
10267                 vport->rxvlan_cfg.strip_tag2_en = true;
10268                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10269         }
10270
10271         vport->rxvlan_cfg.strip_tag1_discard_en = false;
10272         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10273         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10274         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10275
10276         return hclge_set_vlan_rx_offload_cfg(vport);
10277 }
10278
10279 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10280 {
10281         struct hclge_dev *hdev = vport->back;
10282
10283         if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10284                 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10285 }
10286
10287 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10288                                             u16 port_base_vlan_state,
10289                                             struct hclge_vlan_info *new_info,
10290                                             struct hclge_vlan_info *old_info)
10291 {
10292         struct hclge_dev *hdev = vport->back;
10293         int ret;
10294
10295         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10296                 hclge_rm_vport_all_vlan_table(vport, false);
10297                 /* force clear VLAN 0 */
10298                 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10299                 if (ret)
10300                         return ret;
10301                 return hclge_set_vlan_filter_hw(hdev,
10302                                                  htons(new_info->vlan_proto),
10303                                                  vport->vport_id,
10304                                                  new_info->vlan_tag,
10305                                                  false);
10306         }
10307
10308         /* force add VLAN 0 */
10309         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10310         if (ret)
10311                 return ret;
10312
10313         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10314                                        vport->vport_id, old_info->vlan_tag,
10315                                        true);
10316         if (ret)
10317                 return ret;
10318
10319         return hclge_add_vport_all_vlan_table(vport);
10320 }
10321
10322 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10323                                           const struct hclge_vlan_info *old_cfg)
10324 {
10325         if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10326                 return true;
10327
10328         if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10329                 return true;
10330
10331         return false;
10332 }
10333
10334 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10335                                     struct hclge_vlan_info *vlan_info)
10336 {
10337         struct hnae3_handle *nic = &vport->nic;
10338         struct hclge_vlan_info *old_vlan_info;
10339         struct hclge_dev *hdev = vport->back;
10340         int ret;
10341
10342         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10343
10344         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10345                                      vlan_info->qos);
10346         if (ret)
10347                 return ret;
10348
10349         if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10350                 goto out;
10351
10352         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10353                 /* add new VLAN tag */
10354                 ret = hclge_set_vlan_filter_hw(hdev,
10355                                                htons(vlan_info->vlan_proto),
10356                                                vport->vport_id,
10357                                                vlan_info->vlan_tag,
10358                                                false);
10359                 if (ret)
10360                         return ret;
10361
10362                 /* remove old VLAN tag */
10363                 if (old_vlan_info->vlan_tag == 0)
10364                         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10365                                                        true, 0);
10366                 else
10367                         ret = hclge_set_vlan_filter_hw(hdev,
10368                                                        htons(ETH_P_8021Q),
10369                                                        vport->vport_id,
10370                                                        old_vlan_info->vlan_tag,
10371                                                        true);
10372                 if (ret) {
10373                         dev_err(&hdev->pdev->dev,
10374                                 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10375                                 vport->vport_id, old_vlan_info->vlan_tag, ret);
10376                         return ret;
10377                 }
10378
10379                 goto out;
10380         }
10381
10382         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10383                                                old_vlan_info);
10384         if (ret)
10385                 return ret;
10386
10387 out:
10388         vport->port_base_vlan_cfg.state = state;
10389         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10390                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10391         else
10392                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10393
10394         vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10395         hclge_set_vport_vlan_fltr_change(vport);
10396
10397         return 0;
10398 }
10399
10400 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10401                                           enum hnae3_port_base_vlan_state state,
10402                                           u16 vlan, u8 qos)
10403 {
10404         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10405                 if (!vlan && !qos)
10406                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10407
10408                 return HNAE3_PORT_BASE_VLAN_ENABLE;
10409         }
10410
10411         if (!vlan && !qos)
10412                 return HNAE3_PORT_BASE_VLAN_DISABLE;
10413
10414         if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10415             vport->port_base_vlan_cfg.vlan_info.qos == qos)
10416                 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10417
10418         return HNAE3_PORT_BASE_VLAN_MODIFY;
10419 }
10420
10421 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10422                                     u16 vlan, u8 qos, __be16 proto)
10423 {
10424         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10425         struct hclge_vport *vport = hclge_get_vport(handle);
10426         struct hclge_dev *hdev = vport->back;
10427         struct hclge_vlan_info vlan_info;
10428         u16 state;
10429         int ret;
10430
10431         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10432                 return -EOPNOTSUPP;
10433
10434         vport = hclge_get_vf_vport(hdev, vfid);
10435         if (!vport)
10436                 return -EINVAL;
10437
10438         /* qos is a 3 bits value, so can not be bigger than 7 */
10439         if (vlan > VLAN_N_VID - 1 || qos > 7)
10440                 return -EINVAL;
10441         if (proto != htons(ETH_P_8021Q))
10442                 return -EPROTONOSUPPORT;
10443
10444         state = hclge_get_port_base_vlan_state(vport,
10445                                                vport->port_base_vlan_cfg.state,
10446                                                vlan, qos);
10447         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10448                 return 0;
10449
10450         vlan_info.vlan_tag = vlan;
10451         vlan_info.qos = qos;
10452         vlan_info.vlan_proto = ntohs(proto);
10453
10454         ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10455         if (ret) {
10456                 dev_err(&hdev->pdev->dev,
10457                         "failed to update port base vlan for vf %d, ret = %d\n",
10458                         vfid, ret);
10459                 return ret;
10460         }
10461
10462         /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10463          * VLAN state.
10464          */
10465         if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10466             test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10467                 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10468                                                   vport->vport_id, state,
10469                                                   &vlan_info);
10470
10471         return 0;
10472 }
10473
10474 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10475 {
10476         struct hclge_vlan_info *vlan_info;
10477         struct hclge_vport *vport;
10478         int ret;
10479         int vf;
10480
10481         /* clear port base vlan for all vf */
10482         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10483                 vport = &hdev->vport[vf];
10484                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10485
10486                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10487                                                vport->vport_id,
10488                                                vlan_info->vlan_tag, true);
10489                 if (ret)
10490                         dev_err(&hdev->pdev->dev,
10491                                 "failed to clear vf vlan for vf%d, ret = %d\n",
10492                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10493         }
10494 }
10495
10496 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10497                           u16 vlan_id, bool is_kill)
10498 {
10499         struct hclge_vport *vport = hclge_get_vport(handle);
10500         struct hclge_dev *hdev = vport->back;
10501         bool writen_to_tbl = false;
10502         int ret = 0;
10503
10504         /* When device is resetting or reset failed, firmware is unable to
10505          * handle mailbox. Just record the vlan id, and remove it after
10506          * reset finished.
10507          */
10508         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10509              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10510                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10511                 return -EBUSY;
10512         }
10513
10514         /* when port base vlan enabled, we use port base vlan as the vlan
10515          * filter entry. In this case, we don't update vlan filter table
10516          * when user add new vlan or remove exist vlan, just update the vport
10517          * vlan list. The vlan id in vlan list will be writen in vlan filter
10518          * table until port base vlan disabled
10519          */
10520         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10521                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10522                                                vlan_id, is_kill);
10523                 writen_to_tbl = true;
10524         }
10525
10526         if (!ret) {
10527                 if (is_kill)
10528                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10529                 else
10530                         hclge_add_vport_vlan_table(vport, vlan_id,
10531                                                    writen_to_tbl);
10532         } else if (is_kill) {
10533                 /* when remove hw vlan filter failed, record the vlan id,
10534                  * and try to remove it from hw later, to be consistence
10535                  * with stack
10536                  */
10537                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10538         }
10539
10540         hclge_set_vport_vlan_fltr_change(vport);
10541
10542         return ret;
10543 }
10544
10545 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10546 {
10547         struct hclge_vport *vport;
10548         int ret;
10549         u16 i;
10550
10551         for (i = 0; i < hdev->num_alloc_vport; i++) {
10552                 vport = &hdev->vport[i];
10553                 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10554                                         &vport->state))
10555                         continue;
10556
10557                 ret = hclge_enable_vport_vlan_filter(vport,
10558                                                      vport->req_vlan_fltr_en);
10559                 if (ret) {
10560                         dev_err(&hdev->pdev->dev,
10561                                 "failed to sync vlan filter state for vport%u, ret = %d\n",
10562                                 vport->vport_id, ret);
10563                         set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10564                                 &vport->state);
10565                         return;
10566                 }
10567         }
10568 }
10569
10570 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10571 {
10572 #define HCLGE_MAX_SYNC_COUNT    60
10573
10574         int i, ret, sync_cnt = 0;
10575         u16 vlan_id;
10576
10577         /* start from vport 1 for PF is always alive */
10578         for (i = 0; i < hdev->num_alloc_vport; i++) {
10579                 struct hclge_vport *vport = &hdev->vport[i];
10580
10581                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10582                                          VLAN_N_VID);
10583                 while (vlan_id != VLAN_N_VID) {
10584                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10585                                                        vport->vport_id, vlan_id,
10586                                                        true);
10587                         if (ret && ret != -EINVAL)
10588                                 return;
10589
10590                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10591                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10592                         hclge_set_vport_vlan_fltr_change(vport);
10593
10594                         sync_cnt++;
10595                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10596                                 return;
10597
10598                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10599                                                  VLAN_N_VID);
10600                 }
10601         }
10602
10603         hclge_sync_vlan_fltr_state(hdev);
10604 }
10605
10606 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10607 {
10608         struct hclge_config_max_frm_size_cmd *req;
10609         struct hclge_desc desc;
10610
10611         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10612
10613         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10614         req->max_frm_size = cpu_to_le16(new_mps);
10615         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10616
10617         return hclge_cmd_send(&hdev->hw, &desc, 1);
10618 }
10619
10620 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10621 {
10622         struct hclge_vport *vport = hclge_get_vport(handle);
10623
10624         return hclge_set_vport_mtu(vport, new_mtu);
10625 }
10626
10627 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10628 {
10629         struct hclge_dev *hdev = vport->back;
10630         int i, max_frm_size, ret;
10631
10632         /* HW supprt 2 layer vlan */
10633         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10634         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10635             max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10636                 return -EINVAL;
10637
10638         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10639         mutex_lock(&hdev->vport_lock);
10640         /* VF's mps must fit within hdev->mps */
10641         if (vport->vport_id && max_frm_size > hdev->mps) {
10642                 mutex_unlock(&hdev->vport_lock);
10643                 return -EINVAL;
10644         } else if (vport->vport_id) {
10645                 vport->mps = max_frm_size;
10646                 mutex_unlock(&hdev->vport_lock);
10647                 return 0;
10648         }
10649
10650         /* PF's mps must be greater then VF's mps */
10651         for (i = 1; i < hdev->num_alloc_vport; i++)
10652                 if (max_frm_size < hdev->vport[i].mps) {
10653                         mutex_unlock(&hdev->vport_lock);
10654                         return -EINVAL;
10655                 }
10656
10657         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10658
10659         ret = hclge_set_mac_mtu(hdev, max_frm_size);
10660         if (ret) {
10661                 dev_err(&hdev->pdev->dev,
10662                         "Change mtu fail, ret =%d\n", ret);
10663                 goto out;
10664         }
10665
10666         hdev->mps = max_frm_size;
10667         vport->mps = max_frm_size;
10668
10669         ret = hclge_buffer_alloc(hdev);
10670         if (ret)
10671                 dev_err(&hdev->pdev->dev,
10672                         "Allocate buffer fail, ret =%d\n", ret);
10673
10674 out:
10675         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10676         mutex_unlock(&hdev->vport_lock);
10677         return ret;
10678 }
10679
10680 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10681                                     bool enable)
10682 {
10683         struct hclge_reset_tqp_queue_cmd *req;
10684         struct hclge_desc desc;
10685         int ret;
10686
10687         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10688
10689         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10690         req->tqp_id = cpu_to_le16(queue_id);
10691         if (enable)
10692                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10693
10694         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10695         if (ret) {
10696                 dev_err(&hdev->pdev->dev,
10697                         "Send tqp reset cmd error, status =%d\n", ret);
10698                 return ret;
10699         }
10700
10701         return 0;
10702 }
10703
10704 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10705 {
10706         struct hclge_reset_tqp_queue_cmd *req;
10707         struct hclge_desc desc;
10708         int ret;
10709
10710         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10711
10712         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10713         req->tqp_id = cpu_to_le16(queue_id);
10714
10715         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10716         if (ret) {
10717                 dev_err(&hdev->pdev->dev,
10718                         "Get reset status error, status =%d\n", ret);
10719                 return ret;
10720         }
10721
10722         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10723 }
10724
10725 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10726 {
10727         struct hnae3_queue *queue;
10728         struct hclge_tqp *tqp;
10729
10730         queue = handle->kinfo.tqp[queue_id];
10731         tqp = container_of(queue, struct hclge_tqp, q);
10732
10733         return tqp->index;
10734 }
10735
10736 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10737 {
10738         struct hclge_vport *vport = hclge_get_vport(handle);
10739         struct hclge_dev *hdev = vport->back;
10740         u16 reset_try_times = 0;
10741         int reset_status;
10742         u16 queue_gid;
10743         int ret;
10744         u16 i;
10745
10746         for (i = 0; i < handle->kinfo.num_tqps; i++) {
10747                 queue_gid = hclge_covert_handle_qid_global(handle, i);
10748                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10749                 if (ret) {
10750                         dev_err(&hdev->pdev->dev,
10751                                 "failed to send reset tqp cmd, ret = %d\n",
10752                                 ret);
10753                         return ret;
10754                 }
10755
10756                 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10757                         reset_status = hclge_get_reset_status(hdev, queue_gid);
10758                         if (reset_status)
10759                                 break;
10760
10761                         /* Wait for tqp hw reset */
10762                         usleep_range(1000, 1200);
10763                 }
10764
10765                 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10766                         dev_err(&hdev->pdev->dev,
10767                                 "wait for tqp hw reset timeout\n");
10768                         return -ETIME;
10769                 }
10770
10771                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10772                 if (ret) {
10773                         dev_err(&hdev->pdev->dev,
10774                                 "failed to deassert soft reset, ret = %d\n",
10775                                 ret);
10776                         return ret;
10777                 }
10778                 reset_try_times = 0;
10779         }
10780         return 0;
10781 }
10782
10783 static int hclge_reset_rcb(struct hnae3_handle *handle)
10784 {
10785 #define HCLGE_RESET_RCB_NOT_SUPPORT     0U
10786 #define HCLGE_RESET_RCB_SUCCESS         1U
10787
10788         struct hclge_vport *vport = hclge_get_vport(handle);
10789         struct hclge_dev *hdev = vport->back;
10790         struct hclge_reset_cmd *req;
10791         struct hclge_desc desc;
10792         u8 return_status;
10793         u16 queue_gid;
10794         int ret;
10795
10796         queue_gid = hclge_covert_handle_qid_global(handle, 0);
10797
10798         req = (struct hclge_reset_cmd *)desc.data;
10799         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10800         hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10801         req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10802         req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10803
10804         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10805         if (ret) {
10806                 dev_err(&hdev->pdev->dev,
10807                         "failed to send rcb reset cmd, ret = %d\n", ret);
10808                 return ret;
10809         }
10810
10811         return_status = req->fun_reset_rcb_return_status;
10812         if (return_status == HCLGE_RESET_RCB_SUCCESS)
10813                 return 0;
10814
10815         if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10816                 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10817                         return_status);
10818                 return -EIO;
10819         }
10820
10821         /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10822          * again to reset all tqps
10823          */
10824         return hclge_reset_tqp_cmd(handle);
10825 }
10826
10827 int hclge_reset_tqp(struct hnae3_handle *handle)
10828 {
10829         struct hclge_vport *vport = hclge_get_vport(handle);
10830         struct hclge_dev *hdev = vport->back;
10831         int ret;
10832
10833         /* only need to disable PF's tqp */
10834         if (!vport->vport_id) {
10835                 ret = hclge_tqp_enable(handle, false);
10836                 if (ret) {
10837                         dev_err(&hdev->pdev->dev,
10838                                 "failed to disable tqp, ret = %d\n", ret);
10839                         return ret;
10840                 }
10841         }
10842
10843         return hclge_reset_rcb(handle);
10844 }
10845
10846 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10847 {
10848         struct hclge_vport *vport = hclge_get_vport(handle);
10849         struct hclge_dev *hdev = vport->back;
10850
10851         return hdev->fw_version;
10852 }
10853
10854 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10855 {
10856         struct phy_device *phydev = hdev->hw.mac.phydev;
10857
10858         if (!phydev)
10859                 return;
10860
10861         phy_set_asym_pause(phydev, rx_en, tx_en);
10862 }
10863
10864 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10865 {
10866         int ret;
10867
10868         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10869                 return 0;
10870
10871         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10872         if (ret)
10873                 dev_err(&hdev->pdev->dev,
10874                         "configure pauseparam error, ret = %d.\n", ret);
10875
10876         return ret;
10877 }
10878
10879 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10880 {
10881         struct phy_device *phydev = hdev->hw.mac.phydev;
10882         u16 remote_advertising = 0;
10883         u16 local_advertising;
10884         u32 rx_pause, tx_pause;
10885         u8 flowctl;
10886
10887         if (!phydev->link || !phydev->autoneg)
10888                 return 0;
10889
10890         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10891
10892         if (phydev->pause)
10893                 remote_advertising = LPA_PAUSE_CAP;
10894
10895         if (phydev->asym_pause)
10896                 remote_advertising |= LPA_PAUSE_ASYM;
10897
10898         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10899                                            remote_advertising);
10900         tx_pause = flowctl & FLOW_CTRL_TX;
10901         rx_pause = flowctl & FLOW_CTRL_RX;
10902
10903         if (phydev->duplex == HCLGE_MAC_HALF) {
10904                 tx_pause = 0;
10905                 rx_pause = 0;
10906         }
10907
10908         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10909 }
10910
10911 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10912                                  u32 *rx_en, u32 *tx_en)
10913 {
10914         struct hclge_vport *vport = hclge_get_vport(handle);
10915         struct hclge_dev *hdev = vport->back;
10916         u8 media_type = hdev->hw.mac.media_type;
10917
10918         *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10919                     hclge_get_autoneg(handle) : 0;
10920
10921         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10922                 *rx_en = 0;
10923                 *tx_en = 0;
10924                 return;
10925         }
10926
10927         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10928                 *rx_en = 1;
10929                 *tx_en = 0;
10930         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10931                 *tx_en = 1;
10932                 *rx_en = 0;
10933         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10934                 *rx_en = 1;
10935                 *tx_en = 1;
10936         } else {
10937                 *rx_en = 0;
10938                 *tx_en = 0;
10939         }
10940 }
10941
10942 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10943                                          u32 rx_en, u32 tx_en)
10944 {
10945         if (rx_en && tx_en)
10946                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10947         else if (rx_en && !tx_en)
10948                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10949         else if (!rx_en && tx_en)
10950                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10951         else
10952                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10953
10954         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10955 }
10956
10957 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10958                                 u32 rx_en, u32 tx_en)
10959 {
10960         struct hclge_vport *vport = hclge_get_vport(handle);
10961         struct hclge_dev *hdev = vport->back;
10962         struct phy_device *phydev = hdev->hw.mac.phydev;
10963         u32 fc_autoneg;
10964
10965         if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10966                 fc_autoneg = hclge_get_autoneg(handle);
10967                 if (auto_neg != fc_autoneg) {
10968                         dev_info(&hdev->pdev->dev,
10969                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10970                         return -EOPNOTSUPP;
10971                 }
10972         }
10973
10974         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10975                 dev_info(&hdev->pdev->dev,
10976                          "Priority flow control enabled. Cannot set link flow control.\n");
10977                 return -EOPNOTSUPP;
10978         }
10979
10980         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10981
10982         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10983
10984         if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10985                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10986
10987         if (phydev)
10988                 return phy_start_aneg(phydev);
10989
10990         return -EOPNOTSUPP;
10991 }
10992
10993 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10994                                           u8 *auto_neg, u32 *speed, u8 *duplex)
10995 {
10996         struct hclge_vport *vport = hclge_get_vport(handle);
10997         struct hclge_dev *hdev = vport->back;
10998
10999         if (speed)
11000                 *speed = hdev->hw.mac.speed;
11001         if (duplex)
11002                 *duplex = hdev->hw.mac.duplex;
11003         if (auto_neg)
11004                 *auto_neg = hdev->hw.mac.autoneg;
11005 }
11006
11007 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11008                                  u8 *module_type)
11009 {
11010         struct hclge_vport *vport = hclge_get_vport(handle);
11011         struct hclge_dev *hdev = vport->back;
11012
11013         /* When nic is down, the service task is not running, doesn't update
11014          * the port information per second. Query the port information before
11015          * return the media type, ensure getting the correct media information.
11016          */
11017         hclge_update_port_info(hdev);
11018
11019         if (media_type)
11020                 *media_type = hdev->hw.mac.media_type;
11021
11022         if (module_type)
11023                 *module_type = hdev->hw.mac.module_type;
11024 }
11025
11026 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11027                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11028 {
11029         struct hclge_vport *vport = hclge_get_vport(handle);
11030         struct hclge_dev *hdev = vport->back;
11031         struct phy_device *phydev = hdev->hw.mac.phydev;
11032         int mdix_ctrl, mdix, is_resolved;
11033         unsigned int retval;
11034
11035         if (!phydev) {
11036                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11037                 *tp_mdix = ETH_TP_MDI_INVALID;
11038                 return;
11039         }
11040
11041         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11042
11043         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11044         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11045                                     HCLGE_PHY_MDIX_CTRL_S);
11046
11047         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11048         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11049         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11050
11051         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11052
11053         switch (mdix_ctrl) {
11054         case 0x0:
11055                 *tp_mdix_ctrl = ETH_TP_MDI;
11056                 break;
11057         case 0x1:
11058                 *tp_mdix_ctrl = ETH_TP_MDI_X;
11059                 break;
11060         case 0x3:
11061                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11062                 break;
11063         default:
11064                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11065                 break;
11066         }
11067
11068         if (!is_resolved)
11069                 *tp_mdix = ETH_TP_MDI_INVALID;
11070         else if (mdix)
11071                 *tp_mdix = ETH_TP_MDI_X;
11072         else
11073                 *tp_mdix = ETH_TP_MDI;
11074 }
11075
11076 static void hclge_info_show(struct hclge_dev *hdev)
11077 {
11078         struct device *dev = &hdev->pdev->dev;
11079
11080         dev_info(dev, "PF info begin:\n");
11081
11082         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11083         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11084         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11085         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11086         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11087         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11088         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11089         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11090         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11091         dev_info(dev, "This is %s PF\n",
11092                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11093         dev_info(dev, "DCB %s\n",
11094                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11095         dev_info(dev, "MQPRIO %s\n",
11096                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11097         dev_info(dev, "Default tx spare buffer size: %u\n",
11098                  hdev->tx_spare_buf_size);
11099
11100         dev_info(dev, "PF info end.\n");
11101 }
11102
11103 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11104                                           struct hclge_vport *vport)
11105 {
11106         struct hnae3_client *client = vport->nic.client;
11107         struct hclge_dev *hdev = ae_dev->priv;
11108         int rst_cnt = hdev->rst_stats.reset_cnt;
11109         int ret;
11110
11111         ret = client->ops->init_instance(&vport->nic);
11112         if (ret)
11113                 return ret;
11114
11115         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11116         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11117             rst_cnt != hdev->rst_stats.reset_cnt) {
11118                 ret = -EBUSY;
11119                 goto init_nic_err;
11120         }
11121
11122         /* Enable nic hw error interrupts */
11123         ret = hclge_config_nic_hw_error(hdev, true);
11124         if (ret) {
11125                 dev_err(&ae_dev->pdev->dev,
11126                         "fail(%d) to enable hw error interrupts\n", ret);
11127                 goto init_nic_err;
11128         }
11129
11130         hnae3_set_client_init_flag(client, ae_dev, 1);
11131
11132         if (netif_msg_drv(&hdev->vport->nic))
11133                 hclge_info_show(hdev);
11134
11135         return ret;
11136
11137 init_nic_err:
11138         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11139         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11140                 msleep(HCLGE_WAIT_RESET_DONE);
11141
11142         client->ops->uninit_instance(&vport->nic, 0);
11143
11144         return ret;
11145 }
11146
11147 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11148                                            struct hclge_vport *vport)
11149 {
11150         struct hclge_dev *hdev = ae_dev->priv;
11151         struct hnae3_client *client;
11152         int rst_cnt;
11153         int ret;
11154
11155         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11156             !hdev->nic_client)
11157                 return 0;
11158
11159         client = hdev->roce_client;
11160         ret = hclge_init_roce_base_info(vport);
11161         if (ret)
11162                 return ret;
11163
11164         rst_cnt = hdev->rst_stats.reset_cnt;
11165         ret = client->ops->init_instance(&vport->roce);
11166         if (ret)
11167                 return ret;
11168
11169         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11170         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11171             rst_cnt != hdev->rst_stats.reset_cnt) {
11172                 ret = -EBUSY;
11173                 goto init_roce_err;
11174         }
11175
11176         /* Enable roce ras interrupts */
11177         ret = hclge_config_rocee_ras_interrupt(hdev, true);
11178         if (ret) {
11179                 dev_err(&ae_dev->pdev->dev,
11180                         "fail(%d) to enable roce ras interrupts\n", ret);
11181                 goto init_roce_err;
11182         }
11183
11184         hnae3_set_client_init_flag(client, ae_dev, 1);
11185
11186         return 0;
11187
11188 init_roce_err:
11189         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11190         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11191                 msleep(HCLGE_WAIT_RESET_DONE);
11192
11193         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11194
11195         return ret;
11196 }
11197
11198 static int hclge_init_client_instance(struct hnae3_client *client,
11199                                       struct hnae3_ae_dev *ae_dev)
11200 {
11201         struct hclge_dev *hdev = ae_dev->priv;
11202         struct hclge_vport *vport = &hdev->vport[0];
11203         int ret;
11204
11205         switch (client->type) {
11206         case HNAE3_CLIENT_KNIC:
11207                 hdev->nic_client = client;
11208                 vport->nic.client = client;
11209                 ret = hclge_init_nic_client_instance(ae_dev, vport);
11210                 if (ret)
11211                         goto clear_nic;
11212
11213                 ret = hclge_init_roce_client_instance(ae_dev, vport);
11214                 if (ret)
11215                         goto clear_roce;
11216
11217                 break;
11218         case HNAE3_CLIENT_ROCE:
11219                 if (hnae3_dev_roce_supported(hdev)) {
11220                         hdev->roce_client = client;
11221                         vport->roce.client = client;
11222                 }
11223
11224                 ret = hclge_init_roce_client_instance(ae_dev, vport);
11225                 if (ret)
11226                         goto clear_roce;
11227
11228                 break;
11229         default:
11230                 return -EINVAL;
11231         }
11232
11233         return 0;
11234
11235 clear_nic:
11236         hdev->nic_client = NULL;
11237         vport->nic.client = NULL;
11238         return ret;
11239 clear_roce:
11240         hdev->roce_client = NULL;
11241         vport->roce.client = NULL;
11242         return ret;
11243 }
11244
11245 static void hclge_uninit_client_instance(struct hnae3_client *client,
11246                                          struct hnae3_ae_dev *ae_dev)
11247 {
11248         struct hclge_dev *hdev = ae_dev->priv;
11249         struct hclge_vport *vport = &hdev->vport[0];
11250
11251         if (hdev->roce_client) {
11252                 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11253                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11254                         msleep(HCLGE_WAIT_RESET_DONE);
11255
11256                 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11257                 hdev->roce_client = NULL;
11258                 vport->roce.client = NULL;
11259         }
11260         if (client->type == HNAE3_CLIENT_ROCE)
11261                 return;
11262         if (hdev->nic_client && client->ops->uninit_instance) {
11263                 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11264                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11265                         msleep(HCLGE_WAIT_RESET_DONE);
11266
11267                 client->ops->uninit_instance(&vport->nic, 0);
11268                 hdev->nic_client = NULL;
11269                 vport->nic.client = NULL;
11270         }
11271 }
11272
11273 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11274 {
11275 #define HCLGE_MEM_BAR           4
11276
11277         struct pci_dev *pdev = hdev->pdev;
11278         struct hclge_hw *hw = &hdev->hw;
11279
11280         /* for device does not have device memory, return directly */
11281         if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11282                 return 0;
11283
11284         hw->mem_base = devm_ioremap_wc(&pdev->dev,
11285                                        pci_resource_start(pdev, HCLGE_MEM_BAR),
11286                                        pci_resource_len(pdev, HCLGE_MEM_BAR));
11287         if (!hw->mem_base) {
11288                 dev_err(&pdev->dev, "failed to map device memory\n");
11289                 return -EFAULT;
11290         }
11291
11292         return 0;
11293 }
11294
11295 static int hclge_pci_init(struct hclge_dev *hdev)
11296 {
11297         struct pci_dev *pdev = hdev->pdev;
11298         struct hclge_hw *hw;
11299         int ret;
11300
11301         ret = pci_enable_device(pdev);
11302         if (ret) {
11303                 dev_err(&pdev->dev, "failed to enable PCI device\n");
11304                 return ret;
11305         }
11306
11307         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11308         if (ret) {
11309                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11310                 if (ret) {
11311                         dev_err(&pdev->dev,
11312                                 "can't set consistent PCI DMA");
11313                         goto err_disable_device;
11314                 }
11315                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11316         }
11317
11318         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11319         if (ret) {
11320                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11321                 goto err_disable_device;
11322         }
11323
11324         pci_set_master(pdev);
11325         hw = &hdev->hw;
11326         hw->io_base = pcim_iomap(pdev, 2, 0);
11327         if (!hw->io_base) {
11328                 dev_err(&pdev->dev, "Can't map configuration register space\n");
11329                 ret = -ENOMEM;
11330                 goto err_clr_master;
11331         }
11332
11333         ret = hclge_dev_mem_map(hdev);
11334         if (ret)
11335                 goto err_unmap_io_base;
11336
11337         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11338
11339         return 0;
11340
11341 err_unmap_io_base:
11342         pcim_iounmap(pdev, hdev->hw.io_base);
11343 err_clr_master:
11344         pci_clear_master(pdev);
11345         pci_release_regions(pdev);
11346 err_disable_device:
11347         pci_disable_device(pdev);
11348
11349         return ret;
11350 }
11351
11352 static void hclge_pci_uninit(struct hclge_dev *hdev)
11353 {
11354         struct pci_dev *pdev = hdev->pdev;
11355
11356         if (hdev->hw.mem_base)
11357                 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11358
11359         pcim_iounmap(pdev, hdev->hw.io_base);
11360         pci_free_irq_vectors(pdev);
11361         pci_clear_master(pdev);
11362         pci_release_mem_regions(pdev);
11363         pci_disable_device(pdev);
11364 }
11365
11366 static void hclge_state_init(struct hclge_dev *hdev)
11367 {
11368         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11369         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11370         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11371         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11372         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11373         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11374         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11375 }
11376
11377 static void hclge_state_uninit(struct hclge_dev *hdev)
11378 {
11379         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11380         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11381
11382         if (hdev->reset_timer.function)
11383                 del_timer_sync(&hdev->reset_timer);
11384         if (hdev->service_task.work.func)
11385                 cancel_delayed_work_sync(&hdev->service_task);
11386 }
11387
11388 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11389                                         enum hnae3_reset_type rst_type)
11390 {
11391 #define HCLGE_RESET_RETRY_WAIT_MS       500
11392 #define HCLGE_RESET_RETRY_CNT   5
11393
11394         struct hclge_dev *hdev = ae_dev->priv;
11395         int retry_cnt = 0;
11396         int ret;
11397
11398 retry:
11399         down(&hdev->reset_sem);
11400         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11401         hdev->reset_type = rst_type;
11402         ret = hclge_reset_prepare(hdev);
11403         if (ret || hdev->reset_pending) {
11404                 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11405                         ret);
11406                 if (hdev->reset_pending ||
11407                     retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11408                         dev_err(&hdev->pdev->dev,
11409                                 "reset_pending:0x%lx, retry_cnt:%d\n",
11410                                 hdev->reset_pending, retry_cnt);
11411                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11412                         up(&hdev->reset_sem);
11413                         msleep(HCLGE_RESET_RETRY_WAIT_MS);
11414                         goto retry;
11415                 }
11416         }
11417
11418         /* disable misc vector before reset done */
11419         hclge_enable_vector(&hdev->misc_vector, false);
11420         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11421
11422         if (hdev->reset_type == HNAE3_FLR_RESET)
11423                 hdev->rst_stats.flr_rst_cnt++;
11424 }
11425
11426 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11427 {
11428         struct hclge_dev *hdev = ae_dev->priv;
11429         int ret;
11430
11431         hclge_enable_vector(&hdev->misc_vector, true);
11432
11433         ret = hclge_reset_rebuild(hdev);
11434         if (ret)
11435                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11436
11437         hdev->reset_type = HNAE3_NONE_RESET;
11438         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11439         up(&hdev->reset_sem);
11440 }
11441
11442 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11443 {
11444         u16 i;
11445
11446         for (i = 0; i < hdev->num_alloc_vport; i++) {
11447                 struct hclge_vport *vport = &hdev->vport[i];
11448                 int ret;
11449
11450                  /* Send cmd to clear VF's FUNC_RST_ING */
11451                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11452                 if (ret)
11453                         dev_warn(&hdev->pdev->dev,
11454                                  "clear vf(%u) rst failed %d!\n",
11455                                  vport->vport_id, ret);
11456         }
11457 }
11458
11459 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11460 {
11461         struct hclge_desc desc;
11462         int ret;
11463
11464         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11465
11466         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11467         /* This new command is only supported by new firmware, it will
11468          * fail with older firmware. Error value -EOPNOSUPP can only be
11469          * returned by older firmware running this command, to keep code
11470          * backward compatible we will override this value and return
11471          * success.
11472          */
11473         if (ret && ret != -EOPNOTSUPP) {
11474                 dev_err(&hdev->pdev->dev,
11475                         "failed to clear hw resource, ret = %d\n", ret);
11476                 return ret;
11477         }
11478         return 0;
11479 }
11480
11481 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11482 {
11483         if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11484                 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11485 }
11486
11487 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11488 {
11489         if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11490                 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11491 }
11492
11493 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11494 {
11495         struct pci_dev *pdev = ae_dev->pdev;
11496         struct hclge_dev *hdev;
11497         int ret;
11498
11499         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11500         if (!hdev)
11501                 return -ENOMEM;
11502
11503         hdev->pdev = pdev;
11504         hdev->ae_dev = ae_dev;
11505         hdev->reset_type = HNAE3_NONE_RESET;
11506         hdev->reset_level = HNAE3_FUNC_RESET;
11507         ae_dev->priv = hdev;
11508
11509         /* HW supprt 2 layer vlan */
11510         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11511
11512         mutex_init(&hdev->vport_lock);
11513         spin_lock_init(&hdev->fd_rule_lock);
11514         sema_init(&hdev->reset_sem, 1);
11515
11516         ret = hclge_pci_init(hdev);
11517         if (ret)
11518                 goto out;
11519
11520         ret = hclge_devlink_init(hdev);
11521         if (ret)
11522                 goto err_pci_uninit;
11523
11524         /* Firmware command queue initialize */
11525         ret = hclge_cmd_queue_init(hdev);
11526         if (ret)
11527                 goto err_devlink_uninit;
11528
11529         /* Firmware command initialize */
11530         ret = hclge_cmd_init(hdev);
11531         if (ret)
11532                 goto err_cmd_uninit;
11533
11534         ret  = hclge_clear_hw_resource(hdev);
11535         if (ret)
11536                 goto err_cmd_uninit;
11537
11538         ret = hclge_get_cap(hdev);
11539         if (ret)
11540                 goto err_cmd_uninit;
11541
11542         ret = hclge_query_dev_specs(hdev);
11543         if (ret) {
11544                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11545                         ret);
11546                 goto err_cmd_uninit;
11547         }
11548
11549         ret = hclge_configure(hdev);
11550         if (ret) {
11551                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11552                 goto err_cmd_uninit;
11553         }
11554
11555         ret = hclge_init_msi(hdev);
11556         if (ret) {
11557                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11558                 goto err_cmd_uninit;
11559         }
11560
11561         ret = hclge_misc_irq_init(hdev);
11562         if (ret)
11563                 goto err_msi_uninit;
11564
11565         ret = hclge_alloc_tqps(hdev);
11566         if (ret) {
11567                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11568                 goto err_msi_irq_uninit;
11569         }
11570
11571         ret = hclge_alloc_vport(hdev);
11572         if (ret)
11573                 goto err_msi_irq_uninit;
11574
11575         ret = hclge_map_tqp(hdev);
11576         if (ret)
11577                 goto err_msi_irq_uninit;
11578
11579         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11580             !hnae3_dev_phy_imp_supported(hdev)) {
11581                 ret = hclge_mac_mdio_config(hdev);
11582                 if (ret)
11583                         goto err_msi_irq_uninit;
11584         }
11585
11586         ret = hclge_init_umv_space(hdev);
11587         if (ret)
11588                 goto err_mdiobus_unreg;
11589
11590         ret = hclge_mac_init(hdev);
11591         if (ret) {
11592                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11593                 goto err_mdiobus_unreg;
11594         }
11595
11596         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11597         if (ret) {
11598                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11599                 goto err_mdiobus_unreg;
11600         }
11601
11602         ret = hclge_config_gro(hdev);
11603         if (ret)
11604                 goto err_mdiobus_unreg;
11605
11606         ret = hclge_init_vlan_config(hdev);
11607         if (ret) {
11608                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11609                 goto err_mdiobus_unreg;
11610         }
11611
11612         ret = hclge_tm_schd_init(hdev);
11613         if (ret) {
11614                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11615                 goto err_mdiobus_unreg;
11616         }
11617
11618         ret = hclge_rss_init_cfg(hdev);
11619         if (ret) {
11620                 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11621                 goto err_mdiobus_unreg;
11622         }
11623
11624         ret = hclge_rss_init_hw(hdev);
11625         if (ret) {
11626                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11627                 goto err_mdiobus_unreg;
11628         }
11629
11630         ret = init_mgr_tbl(hdev);
11631         if (ret) {
11632                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11633                 goto err_mdiobus_unreg;
11634         }
11635
11636         ret = hclge_init_fd_config(hdev);
11637         if (ret) {
11638                 dev_err(&pdev->dev,
11639                         "fd table init fail, ret=%d\n", ret);
11640                 goto err_mdiobus_unreg;
11641         }
11642
11643         ret = hclge_ptp_init(hdev);
11644         if (ret)
11645                 goto err_mdiobus_unreg;
11646
11647         INIT_KFIFO(hdev->mac_tnl_log);
11648
11649         hclge_dcb_ops_set(hdev);
11650
11651         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11652         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11653
11654         /* Setup affinity after service timer setup because add_timer_on
11655          * is called in affinity notify.
11656          */
11657         hclge_misc_affinity_setup(hdev);
11658
11659         hclge_clear_all_event_cause(hdev);
11660         hclge_clear_resetting_state(hdev);
11661
11662         /* Log and clear the hw errors those already occurred */
11663         if (hnae3_dev_ras_imp_supported(hdev))
11664                 hclge_handle_occurred_error(hdev);
11665         else
11666                 hclge_handle_all_hns_hw_errors(ae_dev);
11667
11668         /* request delayed reset for the error recovery because an immediate
11669          * global reset on a PF affecting pending initialization of other PFs
11670          */
11671         if (ae_dev->hw_err_reset_req) {
11672                 enum hnae3_reset_type reset_level;
11673
11674                 reset_level = hclge_get_reset_level(ae_dev,
11675                                                     &ae_dev->hw_err_reset_req);
11676                 hclge_set_def_reset_request(ae_dev, reset_level);
11677                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11678         }
11679
11680         hclge_init_rxd_adv_layout(hdev);
11681
11682         /* Enable MISC vector(vector0) */
11683         hclge_enable_vector(&hdev->misc_vector, true);
11684
11685         hclge_state_init(hdev);
11686         hdev->last_reset_time = jiffies;
11687
11688         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11689                  HCLGE_DRIVER_NAME);
11690
11691         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11692
11693         return 0;
11694
11695 err_mdiobus_unreg:
11696         if (hdev->hw.mac.phydev)
11697                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11698 err_msi_irq_uninit:
11699         hclge_misc_irq_uninit(hdev);
11700 err_msi_uninit:
11701         pci_free_irq_vectors(pdev);
11702 err_cmd_uninit:
11703         hclge_cmd_uninit(hdev);
11704 err_devlink_uninit:
11705         hclge_devlink_uninit(hdev);
11706 err_pci_uninit:
11707         pcim_iounmap(pdev, hdev->hw.io_base);
11708         pci_clear_master(pdev);
11709         pci_release_regions(pdev);
11710         pci_disable_device(pdev);
11711 out:
11712         mutex_destroy(&hdev->vport_lock);
11713         return ret;
11714 }
11715
11716 static void hclge_stats_clear(struct hclge_dev *hdev)
11717 {
11718         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11719 }
11720
11721 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11722 {
11723         return hclge_config_switch_param(hdev, vf, enable,
11724                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
11725 }
11726
11727 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11728 {
11729         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11730                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
11731                                           enable, vf);
11732 }
11733
11734 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11735 {
11736         int ret;
11737
11738         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11739         if (ret) {
11740                 dev_err(&hdev->pdev->dev,
11741                         "Set vf %d mac spoof check %s failed, ret=%d\n",
11742                         vf, enable ? "on" : "off", ret);
11743                 return ret;
11744         }
11745
11746         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11747         if (ret)
11748                 dev_err(&hdev->pdev->dev,
11749                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
11750                         vf, enable ? "on" : "off", ret);
11751
11752         return ret;
11753 }
11754
11755 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11756                                  bool enable)
11757 {
11758         struct hclge_vport *vport = hclge_get_vport(handle);
11759         struct hclge_dev *hdev = vport->back;
11760         u32 new_spoofchk = enable ? 1 : 0;
11761         int ret;
11762
11763         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11764                 return -EOPNOTSUPP;
11765
11766         vport = hclge_get_vf_vport(hdev, vf);
11767         if (!vport)
11768                 return -EINVAL;
11769
11770         if (vport->vf_info.spoofchk == new_spoofchk)
11771                 return 0;
11772
11773         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11774                 dev_warn(&hdev->pdev->dev,
11775                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11776                          vf);
11777         else if (enable && hclge_is_umv_space_full(vport, true))
11778                 dev_warn(&hdev->pdev->dev,
11779                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11780                          vf);
11781
11782         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11783         if (ret)
11784                 return ret;
11785
11786         vport->vf_info.spoofchk = new_spoofchk;
11787         return 0;
11788 }
11789
11790 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11791 {
11792         struct hclge_vport *vport = hdev->vport;
11793         int ret;
11794         int i;
11795
11796         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11797                 return 0;
11798
11799         /* resume the vf spoof check state after reset */
11800         for (i = 0; i < hdev->num_alloc_vport; i++) {
11801                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11802                                                vport->vf_info.spoofchk);
11803                 if (ret)
11804                         return ret;
11805
11806                 vport++;
11807         }
11808
11809         return 0;
11810 }
11811
11812 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11813 {
11814         struct hclge_vport *vport = hclge_get_vport(handle);
11815         struct hclge_dev *hdev = vport->back;
11816         u32 new_trusted = enable ? 1 : 0;
11817
11818         vport = hclge_get_vf_vport(hdev, vf);
11819         if (!vport)
11820                 return -EINVAL;
11821
11822         if (vport->vf_info.trusted == new_trusted)
11823                 return 0;
11824
11825         vport->vf_info.trusted = new_trusted;
11826         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11827         hclge_task_schedule(hdev, 0);
11828
11829         return 0;
11830 }
11831
11832 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11833 {
11834         int ret;
11835         int vf;
11836
11837         /* reset vf rate to default value */
11838         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11839                 struct hclge_vport *vport = &hdev->vport[vf];
11840
11841                 vport->vf_info.max_tx_rate = 0;
11842                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11843                 if (ret)
11844                         dev_err(&hdev->pdev->dev,
11845                                 "vf%d failed to reset to default, ret=%d\n",
11846                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
11847         }
11848 }
11849
11850 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11851                                      int min_tx_rate, int max_tx_rate)
11852 {
11853         if (min_tx_rate != 0 ||
11854             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11855                 dev_err(&hdev->pdev->dev,
11856                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11857                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11858                 return -EINVAL;
11859         }
11860
11861         return 0;
11862 }
11863
11864 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11865                              int min_tx_rate, int max_tx_rate, bool force)
11866 {
11867         struct hclge_vport *vport = hclge_get_vport(handle);
11868         struct hclge_dev *hdev = vport->back;
11869         int ret;
11870
11871         ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11872         if (ret)
11873                 return ret;
11874
11875         vport = hclge_get_vf_vport(hdev, vf);
11876         if (!vport)
11877                 return -EINVAL;
11878
11879         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11880                 return 0;
11881
11882         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11883         if (ret)
11884                 return ret;
11885
11886         vport->vf_info.max_tx_rate = max_tx_rate;
11887
11888         return 0;
11889 }
11890
11891 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11892 {
11893         struct hnae3_handle *handle = &hdev->vport->nic;
11894         struct hclge_vport *vport;
11895         int ret;
11896         int vf;
11897
11898         /* resume the vf max_tx_rate after reset */
11899         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11900                 vport = hclge_get_vf_vport(hdev, vf);
11901                 if (!vport)
11902                         return -EINVAL;
11903
11904                 /* zero means max rate, after reset, firmware already set it to
11905                  * max rate, so just continue.
11906                  */
11907                 if (!vport->vf_info.max_tx_rate)
11908                         continue;
11909
11910                 ret = hclge_set_vf_rate(handle, vf, 0,
11911                                         vport->vf_info.max_tx_rate, true);
11912                 if (ret) {
11913                         dev_err(&hdev->pdev->dev,
11914                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11915                                 vf, vport->vf_info.max_tx_rate, ret);
11916                         return ret;
11917                 }
11918         }
11919
11920         return 0;
11921 }
11922
11923 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11924 {
11925         struct hclge_vport *vport = hdev->vport;
11926         int i;
11927
11928         for (i = 0; i < hdev->num_alloc_vport; i++) {
11929                 hclge_vport_stop(vport);
11930                 vport++;
11931         }
11932 }
11933
11934 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11935 {
11936         struct hclge_dev *hdev = ae_dev->priv;
11937         struct pci_dev *pdev = ae_dev->pdev;
11938         int ret;
11939
11940         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11941
11942         hclge_stats_clear(hdev);
11943         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11944          * so here should not clean table in memory.
11945          */
11946         if (hdev->reset_type == HNAE3_IMP_RESET ||
11947             hdev->reset_type == HNAE3_GLOBAL_RESET) {
11948                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11949                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11950                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11951                 hclge_reset_umv_space(hdev);
11952         }
11953
11954         ret = hclge_cmd_init(hdev);
11955         if (ret) {
11956                 dev_err(&pdev->dev, "Cmd queue init failed\n");
11957                 return ret;
11958         }
11959
11960         ret = hclge_map_tqp(hdev);
11961         if (ret) {
11962                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11963                 return ret;
11964         }
11965
11966         ret = hclge_mac_init(hdev);
11967         if (ret) {
11968                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11969                 return ret;
11970         }
11971
11972         ret = hclge_tp_port_init(hdev);
11973         if (ret) {
11974                 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11975                         ret);
11976                 return ret;
11977         }
11978
11979         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11980         if (ret) {
11981                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11982                 return ret;
11983         }
11984
11985         ret = hclge_config_gro(hdev);
11986         if (ret)
11987                 return ret;
11988
11989         ret = hclge_init_vlan_config(hdev);
11990         if (ret) {
11991                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11992                 return ret;
11993         }
11994
11995         ret = hclge_tm_init_hw(hdev, true);
11996         if (ret) {
11997                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11998                 return ret;
11999         }
12000
12001         ret = hclge_rss_init_hw(hdev);
12002         if (ret) {
12003                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12004                 return ret;
12005         }
12006
12007         ret = init_mgr_tbl(hdev);
12008         if (ret) {
12009                 dev_err(&pdev->dev,
12010                         "failed to reinit manager table, ret = %d\n", ret);
12011                 return ret;
12012         }
12013
12014         ret = hclge_init_fd_config(hdev);
12015         if (ret) {
12016                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12017                 return ret;
12018         }
12019
12020         ret = hclge_ptp_init(hdev);
12021         if (ret)
12022                 return ret;
12023
12024         /* Log and clear the hw errors those already occurred */
12025         if (hnae3_dev_ras_imp_supported(hdev))
12026                 hclge_handle_occurred_error(hdev);
12027         else
12028                 hclge_handle_all_hns_hw_errors(ae_dev);
12029
12030         /* Re-enable the hw error interrupts because
12031          * the interrupts get disabled on global reset.
12032          */
12033         ret = hclge_config_nic_hw_error(hdev, true);
12034         if (ret) {
12035                 dev_err(&pdev->dev,
12036                         "fail(%d) to re-enable NIC hw error interrupts\n",
12037                         ret);
12038                 return ret;
12039         }
12040
12041         if (hdev->roce_client) {
12042                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12043                 if (ret) {
12044                         dev_err(&pdev->dev,
12045                                 "fail(%d) to re-enable roce ras interrupts\n",
12046                                 ret);
12047                         return ret;
12048                 }
12049         }
12050
12051         hclge_reset_vport_state(hdev);
12052         ret = hclge_reset_vport_spoofchk(hdev);
12053         if (ret)
12054                 return ret;
12055
12056         ret = hclge_resume_vf_rate(hdev);
12057         if (ret)
12058                 return ret;
12059
12060         hclge_init_rxd_adv_layout(hdev);
12061
12062         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12063                  HCLGE_DRIVER_NAME);
12064
12065         return 0;
12066 }
12067
12068 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12069 {
12070         struct hclge_dev *hdev = ae_dev->priv;
12071         struct hclge_mac *mac = &hdev->hw.mac;
12072
12073         hclge_reset_vf_rate(hdev);
12074         hclge_clear_vf_vlan(hdev);
12075         hclge_misc_affinity_teardown(hdev);
12076         hclge_state_uninit(hdev);
12077         hclge_ptp_uninit(hdev);
12078         hclge_uninit_rxd_adv_layout(hdev);
12079         hclge_uninit_mac_table(hdev);
12080         hclge_del_all_fd_entries(hdev);
12081
12082         if (mac->phydev)
12083                 mdiobus_unregister(mac->mdio_bus);
12084
12085         /* Disable MISC vector(vector0) */
12086         hclge_enable_vector(&hdev->misc_vector, false);
12087         synchronize_irq(hdev->misc_vector.vector_irq);
12088
12089         /* Disable all hw interrupts */
12090         hclge_config_mac_tnl_int(hdev, false);
12091         hclge_config_nic_hw_error(hdev, false);
12092         hclge_config_rocee_ras_interrupt(hdev, false);
12093
12094         hclge_cmd_uninit(hdev);
12095         hclge_misc_irq_uninit(hdev);
12096         hclge_devlink_uninit(hdev);
12097         hclge_pci_uninit(hdev);
12098         mutex_destroy(&hdev->vport_lock);
12099         hclge_uninit_vport_vlan_table(hdev);
12100         ae_dev->priv = NULL;
12101 }
12102
12103 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12104 {
12105         struct hclge_vport *vport = hclge_get_vport(handle);
12106         struct hclge_dev *hdev = vport->back;
12107
12108         return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12109 }
12110
12111 static void hclge_get_channels(struct hnae3_handle *handle,
12112                                struct ethtool_channels *ch)
12113 {
12114         ch->max_combined = hclge_get_max_channels(handle);
12115         ch->other_count = 1;
12116         ch->max_other = 1;
12117         ch->combined_count = handle->kinfo.rss_size;
12118 }
12119
12120 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12121                                         u16 *alloc_tqps, u16 *max_rss_size)
12122 {
12123         struct hclge_vport *vport = hclge_get_vport(handle);
12124         struct hclge_dev *hdev = vport->back;
12125
12126         *alloc_tqps = vport->alloc_tqps;
12127         *max_rss_size = hdev->pf_rss_size_max;
12128 }
12129
12130 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12131                               bool rxfh_configured)
12132 {
12133         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12134         struct hclge_vport *vport = hclge_get_vport(handle);
12135         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12136         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12137         struct hclge_dev *hdev = vport->back;
12138         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12139         u16 cur_rss_size = kinfo->rss_size;
12140         u16 cur_tqps = kinfo->num_tqps;
12141         u16 tc_valid[HCLGE_MAX_TC_NUM];
12142         u16 roundup_size;
12143         u32 *rss_indir;
12144         unsigned int i;
12145         int ret;
12146
12147         kinfo->req_rss_size = new_tqps_num;
12148
12149         ret = hclge_tm_vport_map_update(hdev);
12150         if (ret) {
12151                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12152                 return ret;
12153         }
12154
12155         roundup_size = roundup_pow_of_two(kinfo->rss_size);
12156         roundup_size = ilog2(roundup_size);
12157         /* Set the RSS TC mode according to the new RSS size */
12158         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12159                 tc_valid[i] = 0;
12160
12161                 if (!(hdev->hw_tc_map & BIT(i)))
12162                         continue;
12163
12164                 tc_valid[i] = 1;
12165                 tc_size[i] = roundup_size;
12166                 tc_offset[i] = kinfo->rss_size * i;
12167         }
12168         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12169         if (ret)
12170                 return ret;
12171
12172         /* RSS indirection table has been configured by user */
12173         if (rxfh_configured)
12174                 goto out;
12175
12176         /* Reinitializes the rss indirect table according to the new RSS size */
12177         rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12178                             GFP_KERNEL);
12179         if (!rss_indir)
12180                 return -ENOMEM;
12181
12182         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12183                 rss_indir[i] = i % kinfo->rss_size;
12184
12185         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12186         if (ret)
12187                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12188                         ret);
12189
12190         kfree(rss_indir);
12191
12192 out:
12193         if (!ret)
12194                 dev_info(&hdev->pdev->dev,
12195                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12196                          cur_rss_size, kinfo->rss_size,
12197                          cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12198
12199         return ret;
12200 }
12201
12202 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12203                               u32 *regs_num_64_bit)
12204 {
12205         struct hclge_desc desc;
12206         u32 total_num;
12207         int ret;
12208
12209         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12210         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12211         if (ret) {
12212                 dev_err(&hdev->pdev->dev,
12213                         "Query register number cmd failed, ret = %d.\n", ret);
12214                 return ret;
12215         }
12216
12217         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12218         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12219
12220         total_num = *regs_num_32_bit + *regs_num_64_bit;
12221         if (!total_num)
12222                 return -EINVAL;
12223
12224         return 0;
12225 }
12226
12227 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12228                                  void *data)
12229 {
12230 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12231 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12232
12233         struct hclge_desc *desc;
12234         u32 *reg_val = data;
12235         __le32 *desc_data;
12236         int nodata_num;
12237         int cmd_num;
12238         int i, k, n;
12239         int ret;
12240
12241         if (regs_num == 0)
12242                 return 0;
12243
12244         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12245         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12246                                HCLGE_32_BIT_REG_RTN_DATANUM);
12247         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12248         if (!desc)
12249                 return -ENOMEM;
12250
12251         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12252         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12253         if (ret) {
12254                 dev_err(&hdev->pdev->dev,
12255                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
12256                 kfree(desc);
12257                 return ret;
12258         }
12259
12260         for (i = 0; i < cmd_num; i++) {
12261                 if (i == 0) {
12262                         desc_data = (__le32 *)(&desc[i].data[0]);
12263                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12264                 } else {
12265                         desc_data = (__le32 *)(&desc[i]);
12266                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
12267                 }
12268                 for (k = 0; k < n; k++) {
12269                         *reg_val++ = le32_to_cpu(*desc_data++);
12270
12271                         regs_num--;
12272                         if (!regs_num)
12273                                 break;
12274                 }
12275         }
12276
12277         kfree(desc);
12278         return 0;
12279 }
12280
12281 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12282                                  void *data)
12283 {
12284 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12285 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12286
12287         struct hclge_desc *desc;
12288         u64 *reg_val = data;
12289         __le64 *desc_data;
12290         int nodata_len;
12291         int cmd_num;
12292         int i, k, n;
12293         int ret;
12294
12295         if (regs_num == 0)
12296                 return 0;
12297
12298         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12299         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12300                                HCLGE_64_BIT_REG_RTN_DATANUM);
12301         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12302         if (!desc)
12303                 return -ENOMEM;
12304
12305         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12306         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12307         if (ret) {
12308                 dev_err(&hdev->pdev->dev,
12309                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
12310                 kfree(desc);
12311                 return ret;
12312         }
12313
12314         for (i = 0; i < cmd_num; i++) {
12315                 if (i == 0) {
12316                         desc_data = (__le64 *)(&desc[i].data[0]);
12317                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12318                 } else {
12319                         desc_data = (__le64 *)(&desc[i]);
12320                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
12321                 }
12322                 for (k = 0; k < n; k++) {
12323                         *reg_val++ = le64_to_cpu(*desc_data++);
12324
12325                         regs_num--;
12326                         if (!regs_num)
12327                                 break;
12328                 }
12329         }
12330
12331         kfree(desc);
12332         return 0;
12333 }
12334
12335 #define MAX_SEPARATE_NUM        4
12336 #define SEPARATOR_VALUE         0xFDFCFBFA
12337 #define REG_NUM_PER_LINE        4
12338 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
12339 #define REG_SEPARATOR_LINE      1
12340 #define REG_NUM_REMAIN_MASK     3
12341
12342 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12343 {
12344         int i;
12345
12346         /* initialize command BD except the last one */
12347         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12348                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12349                                            true);
12350                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12351         }
12352
12353         /* initialize the last command BD */
12354         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12355
12356         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12357 }
12358
12359 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12360                                     int *bd_num_list,
12361                                     u32 type_num)
12362 {
12363         u32 entries_per_desc, desc_index, index, offset, i;
12364         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12365         int ret;
12366
12367         ret = hclge_query_bd_num_cmd_send(hdev, desc);
12368         if (ret) {
12369                 dev_err(&hdev->pdev->dev,
12370                         "Get dfx bd num fail, status is %d.\n", ret);
12371                 return ret;
12372         }
12373
12374         entries_per_desc = ARRAY_SIZE(desc[0].data);
12375         for (i = 0; i < type_num; i++) {
12376                 offset = hclge_dfx_bd_offset_list[i];
12377                 index = offset % entries_per_desc;
12378                 desc_index = offset / entries_per_desc;
12379                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12380         }
12381
12382         return ret;
12383 }
12384
12385 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12386                                   struct hclge_desc *desc_src, int bd_num,
12387                                   enum hclge_opcode_type cmd)
12388 {
12389         struct hclge_desc *desc = desc_src;
12390         int i, ret;
12391
12392         hclge_cmd_setup_basic_desc(desc, cmd, true);
12393         for (i = 0; i < bd_num - 1; i++) {
12394                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12395                 desc++;
12396                 hclge_cmd_setup_basic_desc(desc, cmd, true);
12397         }
12398
12399         desc = desc_src;
12400         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12401         if (ret)
12402                 dev_err(&hdev->pdev->dev,
12403                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12404                         cmd, ret);
12405
12406         return ret;
12407 }
12408
12409 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12410                                     void *data)
12411 {
12412         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12413         struct hclge_desc *desc = desc_src;
12414         u32 *reg = data;
12415
12416         entries_per_desc = ARRAY_SIZE(desc->data);
12417         reg_num = entries_per_desc * bd_num;
12418         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12419         for (i = 0; i < reg_num; i++) {
12420                 index = i % entries_per_desc;
12421                 desc_index = i / entries_per_desc;
12422                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12423         }
12424         for (i = 0; i < separator_num; i++)
12425                 *reg++ = SEPARATOR_VALUE;
12426
12427         return reg_num + separator_num;
12428 }
12429
12430 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12431 {
12432         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12433         int data_len_per_desc, bd_num, i;
12434         int *bd_num_list;
12435         u32 data_len;
12436         int ret;
12437
12438         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12439         if (!bd_num_list)
12440                 return -ENOMEM;
12441
12442         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12443         if (ret) {
12444                 dev_err(&hdev->pdev->dev,
12445                         "Get dfx reg bd num fail, status is %d.\n", ret);
12446                 goto out;
12447         }
12448
12449         data_len_per_desc = sizeof_field(struct hclge_desc, data);
12450         *len = 0;
12451         for (i = 0; i < dfx_reg_type_num; i++) {
12452                 bd_num = bd_num_list[i];
12453                 data_len = data_len_per_desc * bd_num;
12454                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12455         }
12456
12457 out:
12458         kfree(bd_num_list);
12459         return ret;
12460 }
12461
12462 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12463 {
12464         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12465         int bd_num, bd_num_max, buf_len, i;
12466         struct hclge_desc *desc_src;
12467         int *bd_num_list;
12468         u32 *reg = data;
12469         int ret;
12470
12471         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12472         if (!bd_num_list)
12473                 return -ENOMEM;
12474
12475         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12476         if (ret) {
12477                 dev_err(&hdev->pdev->dev,
12478                         "Get dfx reg bd num fail, status is %d.\n", ret);
12479                 goto out;
12480         }
12481
12482         bd_num_max = bd_num_list[0];
12483         for (i = 1; i < dfx_reg_type_num; i++)
12484                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12485
12486         buf_len = sizeof(*desc_src) * bd_num_max;
12487         desc_src = kzalloc(buf_len, GFP_KERNEL);
12488         if (!desc_src) {
12489                 ret = -ENOMEM;
12490                 goto out;
12491         }
12492
12493         for (i = 0; i < dfx_reg_type_num; i++) {
12494                 bd_num = bd_num_list[i];
12495                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12496                                              hclge_dfx_reg_opcode_list[i]);
12497                 if (ret) {
12498                         dev_err(&hdev->pdev->dev,
12499                                 "Get dfx reg fail, status is %d.\n", ret);
12500                         break;
12501                 }
12502
12503                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12504         }
12505
12506         kfree(desc_src);
12507 out:
12508         kfree(bd_num_list);
12509         return ret;
12510 }
12511
12512 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12513                               struct hnae3_knic_private_info *kinfo)
12514 {
12515 #define HCLGE_RING_REG_OFFSET           0x200
12516 #define HCLGE_RING_INT_REG_OFFSET       0x4
12517
12518         int i, j, reg_num, separator_num;
12519         int data_num_sum;
12520         u32 *reg = data;
12521
12522         /* fetching per-PF registers valus from PF PCIe register space */
12523         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12524         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12525         for (i = 0; i < reg_num; i++)
12526                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12527         for (i = 0; i < separator_num; i++)
12528                 *reg++ = SEPARATOR_VALUE;
12529         data_num_sum = reg_num + separator_num;
12530
12531         reg_num = ARRAY_SIZE(common_reg_addr_list);
12532         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12533         for (i = 0; i < reg_num; i++)
12534                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12535         for (i = 0; i < separator_num; i++)
12536                 *reg++ = SEPARATOR_VALUE;
12537         data_num_sum += reg_num + separator_num;
12538
12539         reg_num = ARRAY_SIZE(ring_reg_addr_list);
12540         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12541         for (j = 0; j < kinfo->num_tqps; j++) {
12542                 for (i = 0; i < reg_num; i++)
12543                         *reg++ = hclge_read_dev(&hdev->hw,
12544                                                 ring_reg_addr_list[i] +
12545                                                 HCLGE_RING_REG_OFFSET * j);
12546                 for (i = 0; i < separator_num; i++)
12547                         *reg++ = SEPARATOR_VALUE;
12548         }
12549         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12550
12551         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12552         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12553         for (j = 0; j < hdev->num_msi_used - 1; j++) {
12554                 for (i = 0; i < reg_num; i++)
12555                         *reg++ = hclge_read_dev(&hdev->hw,
12556                                                 tqp_intr_reg_addr_list[i] +
12557                                                 HCLGE_RING_INT_REG_OFFSET * j);
12558                 for (i = 0; i < separator_num; i++)
12559                         *reg++ = SEPARATOR_VALUE;
12560         }
12561         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12562
12563         return data_num_sum;
12564 }
12565
12566 static int hclge_get_regs_len(struct hnae3_handle *handle)
12567 {
12568         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12569         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12570         struct hclge_vport *vport = hclge_get_vport(handle);
12571         struct hclge_dev *hdev = vport->back;
12572         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12573         int regs_lines_32_bit, regs_lines_64_bit;
12574         int ret;
12575
12576         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12577         if (ret) {
12578                 dev_err(&hdev->pdev->dev,
12579                         "Get register number failed, ret = %d.\n", ret);
12580                 return ret;
12581         }
12582
12583         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12584         if (ret) {
12585                 dev_err(&hdev->pdev->dev,
12586                         "Get dfx reg len failed, ret = %d.\n", ret);
12587                 return ret;
12588         }
12589
12590         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12591                 REG_SEPARATOR_LINE;
12592         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12593                 REG_SEPARATOR_LINE;
12594         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12595                 REG_SEPARATOR_LINE;
12596         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12597                 REG_SEPARATOR_LINE;
12598         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12599                 REG_SEPARATOR_LINE;
12600         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12601                 REG_SEPARATOR_LINE;
12602
12603         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12604                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12605                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12606 }
12607
12608 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12609                            void *data)
12610 {
12611         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12612         struct hclge_vport *vport = hclge_get_vport(handle);
12613         struct hclge_dev *hdev = vport->back;
12614         u32 regs_num_32_bit, regs_num_64_bit;
12615         int i, reg_num, separator_num, ret;
12616         u32 *reg = data;
12617
12618         *version = hdev->fw_version;
12619
12620         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12621         if (ret) {
12622                 dev_err(&hdev->pdev->dev,
12623                         "Get register number failed, ret = %d.\n", ret);
12624                 return;
12625         }
12626
12627         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12628
12629         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12630         if (ret) {
12631                 dev_err(&hdev->pdev->dev,
12632                         "Get 32 bit register failed, ret = %d.\n", ret);
12633                 return;
12634         }
12635         reg_num = regs_num_32_bit;
12636         reg += reg_num;
12637         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12638         for (i = 0; i < separator_num; i++)
12639                 *reg++ = SEPARATOR_VALUE;
12640
12641         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12642         if (ret) {
12643                 dev_err(&hdev->pdev->dev,
12644                         "Get 64 bit register failed, ret = %d.\n", ret);
12645                 return;
12646         }
12647         reg_num = regs_num_64_bit * 2;
12648         reg += reg_num;
12649         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12650         for (i = 0; i < separator_num; i++)
12651                 *reg++ = SEPARATOR_VALUE;
12652
12653         ret = hclge_get_dfx_reg(hdev, reg);
12654         if (ret)
12655                 dev_err(&hdev->pdev->dev,
12656                         "Get dfx register failed, ret = %d.\n", ret);
12657 }
12658
12659 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12660 {
12661         struct hclge_set_led_state_cmd *req;
12662         struct hclge_desc desc;
12663         int ret;
12664
12665         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12666
12667         req = (struct hclge_set_led_state_cmd *)desc.data;
12668         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12669                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12670
12671         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12672         if (ret)
12673                 dev_err(&hdev->pdev->dev,
12674                         "Send set led state cmd error, ret =%d\n", ret);
12675
12676         return ret;
12677 }
12678
12679 enum hclge_led_status {
12680         HCLGE_LED_OFF,
12681         HCLGE_LED_ON,
12682         HCLGE_LED_NO_CHANGE = 0xFF,
12683 };
12684
12685 static int hclge_set_led_id(struct hnae3_handle *handle,
12686                             enum ethtool_phys_id_state status)
12687 {
12688         struct hclge_vport *vport = hclge_get_vport(handle);
12689         struct hclge_dev *hdev = vport->back;
12690
12691         switch (status) {
12692         case ETHTOOL_ID_ACTIVE:
12693                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12694         case ETHTOOL_ID_INACTIVE:
12695                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12696         default:
12697                 return -EINVAL;
12698         }
12699 }
12700
12701 static void hclge_get_link_mode(struct hnae3_handle *handle,
12702                                 unsigned long *supported,
12703                                 unsigned long *advertising)
12704 {
12705         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12706         struct hclge_vport *vport = hclge_get_vport(handle);
12707         struct hclge_dev *hdev = vport->back;
12708         unsigned int idx = 0;
12709
12710         for (; idx < size; idx++) {
12711                 supported[idx] = hdev->hw.mac.supported[idx];
12712                 advertising[idx] = hdev->hw.mac.advertising[idx];
12713         }
12714 }
12715
12716 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12717 {
12718         struct hclge_vport *vport = hclge_get_vport(handle);
12719         struct hclge_dev *hdev = vport->back;
12720         bool gro_en_old = hdev->gro_en;
12721         int ret;
12722
12723         hdev->gro_en = enable;
12724         ret = hclge_config_gro(hdev);
12725         if (ret)
12726                 hdev->gro_en = gro_en_old;
12727
12728         return ret;
12729 }
12730
12731 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12732 {
12733         struct hclge_vport *vport = &hdev->vport[0];
12734         struct hnae3_handle *handle = &vport->nic;
12735         u8 tmp_flags;
12736         int ret;
12737         u16 i;
12738
12739         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12740                 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12741                 vport->last_promisc_flags = vport->overflow_promisc_flags;
12742         }
12743
12744         if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12745                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12746                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12747                                              tmp_flags & HNAE3_MPE);
12748                 if (!ret) {
12749                         clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12750                                   &vport->state);
12751                         set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12752                                 &vport->state);
12753                 }
12754         }
12755
12756         for (i = 1; i < hdev->num_alloc_vport; i++) {
12757                 bool uc_en = false;
12758                 bool mc_en = false;
12759                 bool bc_en;
12760
12761                 vport = &hdev->vport[i];
12762
12763                 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12764                                         &vport->state))
12765                         continue;
12766
12767                 if (vport->vf_info.trusted) {
12768                         uc_en = vport->vf_info.request_uc_en > 0;
12769                         mc_en = vport->vf_info.request_mc_en > 0;
12770                 }
12771                 bc_en = vport->vf_info.request_bc_en > 0;
12772
12773                 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12774                                                  mc_en, bc_en);
12775                 if (ret) {
12776                         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12777                                 &vport->state);
12778                         return;
12779                 }
12780                 hclge_set_vport_vlan_fltr_change(vport);
12781         }
12782 }
12783
12784 static bool hclge_module_existed(struct hclge_dev *hdev)
12785 {
12786         struct hclge_desc desc;
12787         u32 existed;
12788         int ret;
12789
12790         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12791         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12792         if (ret) {
12793                 dev_err(&hdev->pdev->dev,
12794                         "failed to get SFP exist state, ret = %d\n", ret);
12795                 return false;
12796         }
12797
12798         existed = le32_to_cpu(desc.data[0]);
12799
12800         return existed != 0;
12801 }
12802
12803 /* need 6 bds(total 140 bytes) in one reading
12804  * return the number of bytes actually read, 0 means read failed.
12805  */
12806 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12807                                      u32 len, u8 *data)
12808 {
12809         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12810         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12811         u16 read_len;
12812         u16 copy_len;
12813         int ret;
12814         int i;
12815
12816         /* setup all 6 bds to read module eeprom info. */
12817         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12818                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12819                                            true);
12820
12821                 /* bd0~bd4 need next flag */
12822                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12823                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12824         }
12825
12826         /* setup bd0, this bd contains offset and read length. */
12827         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12828         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12829         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12830         sfp_info_bd0->read_len = cpu_to_le16(read_len);
12831
12832         ret = hclge_cmd_send(&hdev->hw, desc, i);
12833         if (ret) {
12834                 dev_err(&hdev->pdev->dev,
12835                         "failed to get SFP eeprom info, ret = %d\n", ret);
12836                 return 0;
12837         }
12838
12839         /* copy sfp info from bd0 to out buffer. */
12840         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12841         memcpy(data, sfp_info_bd0->data, copy_len);
12842         read_len = copy_len;
12843
12844         /* copy sfp info from bd1~bd5 to out buffer if needed. */
12845         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12846                 if (read_len >= len)
12847                         return read_len;
12848
12849                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12850                 memcpy(data + read_len, desc[i].data, copy_len);
12851                 read_len += copy_len;
12852         }
12853
12854         return read_len;
12855 }
12856
12857 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12858                                    u32 len, u8 *data)
12859 {
12860         struct hclge_vport *vport = hclge_get_vport(handle);
12861         struct hclge_dev *hdev = vport->back;
12862         u32 read_len = 0;
12863         u16 data_len;
12864
12865         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12866                 return -EOPNOTSUPP;
12867
12868         if (!hclge_module_existed(hdev))
12869                 return -ENXIO;
12870
12871         while (read_len < len) {
12872                 data_len = hclge_get_sfp_eeprom_info(hdev,
12873                                                      offset + read_len,
12874                                                      len - read_len,
12875                                                      data + read_len);
12876                 if (!data_len)
12877                         return -EIO;
12878
12879                 read_len += data_len;
12880         }
12881
12882         return 0;
12883 }
12884
12885 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12886                                          u32 *status_code)
12887 {
12888         struct hclge_vport *vport = hclge_get_vport(handle);
12889         struct hclge_dev *hdev = vport->back;
12890         struct hclge_desc desc;
12891         int ret;
12892
12893         if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12894                 return -EOPNOTSUPP;
12895
12896         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12897         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12898         if (ret) {
12899                 dev_err(&hdev->pdev->dev,
12900                         "failed to query link diagnosis info, ret = %d\n", ret);
12901                 return ret;
12902         }
12903
12904         *status_code = le32_to_cpu(desc.data[0]);
12905         return 0;
12906 }
12907
12908 static const struct hnae3_ae_ops hclge_ops = {
12909         .init_ae_dev = hclge_init_ae_dev,
12910         .uninit_ae_dev = hclge_uninit_ae_dev,
12911         .reset_prepare = hclge_reset_prepare_general,
12912         .reset_done = hclge_reset_done,
12913         .init_client_instance = hclge_init_client_instance,
12914         .uninit_client_instance = hclge_uninit_client_instance,
12915         .map_ring_to_vector = hclge_map_ring_to_vector,
12916         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12917         .get_vector = hclge_get_vector,
12918         .put_vector = hclge_put_vector,
12919         .set_promisc_mode = hclge_set_promisc_mode,
12920         .request_update_promisc_mode = hclge_request_update_promisc_mode,
12921         .set_loopback = hclge_set_loopback,
12922         .start = hclge_ae_start,
12923         .stop = hclge_ae_stop,
12924         .client_start = hclge_client_start,
12925         .client_stop = hclge_client_stop,
12926         .get_status = hclge_get_status,
12927         .get_ksettings_an_result = hclge_get_ksettings_an_result,
12928         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12929         .get_media_type = hclge_get_media_type,
12930         .check_port_speed = hclge_check_port_speed,
12931         .get_fec = hclge_get_fec,
12932         .set_fec = hclge_set_fec,
12933         .get_rss_key_size = hclge_get_rss_key_size,
12934         .get_rss = hclge_get_rss,
12935         .set_rss = hclge_set_rss,
12936         .set_rss_tuple = hclge_set_rss_tuple,
12937         .get_rss_tuple = hclge_get_rss_tuple,
12938         .get_tc_size = hclge_get_tc_size,
12939         .get_mac_addr = hclge_get_mac_addr,
12940         .set_mac_addr = hclge_set_mac_addr,
12941         .do_ioctl = hclge_do_ioctl,
12942         .add_uc_addr = hclge_add_uc_addr,
12943         .rm_uc_addr = hclge_rm_uc_addr,
12944         .add_mc_addr = hclge_add_mc_addr,
12945         .rm_mc_addr = hclge_rm_mc_addr,
12946         .set_autoneg = hclge_set_autoneg,
12947         .get_autoneg = hclge_get_autoneg,
12948         .restart_autoneg = hclge_restart_autoneg,
12949         .halt_autoneg = hclge_halt_autoneg,
12950         .get_pauseparam = hclge_get_pauseparam,
12951         .set_pauseparam = hclge_set_pauseparam,
12952         .set_mtu = hclge_set_mtu,
12953         .reset_queue = hclge_reset_tqp,
12954         .get_stats = hclge_get_stats,
12955         .get_mac_stats = hclge_get_mac_stat,
12956         .update_stats = hclge_update_stats,
12957         .get_strings = hclge_get_strings,
12958         .get_sset_count = hclge_get_sset_count,
12959         .get_fw_version = hclge_get_fw_version,
12960         .get_mdix_mode = hclge_get_mdix_mode,
12961         .enable_vlan_filter = hclge_enable_vlan_filter,
12962         .set_vlan_filter = hclge_set_vlan_filter,
12963         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12964         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12965         .reset_event = hclge_reset_event,
12966         .get_reset_level = hclge_get_reset_level,
12967         .set_default_reset_request = hclge_set_def_reset_request,
12968         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12969         .set_channels = hclge_set_channels,
12970         .get_channels = hclge_get_channels,
12971         .get_regs_len = hclge_get_regs_len,
12972         .get_regs = hclge_get_regs,
12973         .set_led_id = hclge_set_led_id,
12974         .get_link_mode = hclge_get_link_mode,
12975         .add_fd_entry = hclge_add_fd_entry,
12976         .del_fd_entry = hclge_del_fd_entry,
12977         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12978         .get_fd_rule_info = hclge_get_fd_rule_info,
12979         .get_fd_all_rules = hclge_get_all_rules,
12980         .enable_fd = hclge_enable_fd,
12981         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12982         .dbg_read_cmd = hclge_dbg_read_cmd,
12983         .handle_hw_ras_error = hclge_handle_hw_ras_error,
12984         .get_hw_reset_stat = hclge_get_hw_reset_stat,
12985         .ae_dev_resetting = hclge_ae_dev_resetting,
12986         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12987         .set_gro_en = hclge_gro_en,
12988         .get_global_queue_id = hclge_covert_handle_qid_global,
12989         .set_timer_task = hclge_set_timer_task,
12990         .mac_connect_phy = hclge_mac_connect_phy,
12991         .mac_disconnect_phy = hclge_mac_disconnect_phy,
12992         .get_vf_config = hclge_get_vf_config,
12993         .set_vf_link_state = hclge_set_vf_link_state,
12994         .set_vf_spoofchk = hclge_set_vf_spoofchk,
12995         .set_vf_trust = hclge_set_vf_trust,
12996         .set_vf_rate = hclge_set_vf_rate,
12997         .set_vf_mac = hclge_set_vf_mac,
12998         .get_module_eeprom = hclge_get_module_eeprom,
12999         .get_cmdq_stat = hclge_get_cmdq_stat,
13000         .add_cls_flower = hclge_add_cls_flower,
13001         .del_cls_flower = hclge_del_cls_flower,
13002         .cls_flower_active = hclge_is_cls_flower_active,
13003         .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13004         .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13005         .set_tx_hwts_info = hclge_ptp_set_tx_info,
13006         .get_rx_hwts = hclge_ptp_get_rx_hwts,
13007         .get_ts_info = hclge_ptp_get_ts_info,
13008         .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13009 };
13010
13011 static struct hnae3_ae_algo ae_algo = {
13012         .ops = &hclge_ops,
13013         .pdev_id_table = ae_algo_pci_tbl,
13014 };
13015
13016 static int hclge_init(void)
13017 {
13018         pr_info("%s is initializing\n", HCLGE_NAME);
13019
13020         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
13021         if (!hclge_wq) {
13022                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13023                 return -ENOMEM;
13024         }
13025
13026         hnae3_register_ae_algo(&ae_algo);
13027
13028         return 0;
13029 }
13030
13031 static void hclge_exit(void)
13032 {
13033         hnae3_unregister_ae_algo(&ae_algo);
13034         destroy_workqueue(hclge_wq);
13035 }
13036 module_init(hclge_init);
13037 module_exit(hclge_exit);
13038
13039 MODULE_LICENSE("GPL");
13040 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13041 MODULE_DESCRIPTION("HCLGE Driver");
13042 MODULE_VERSION(HCLGE_MOD_VERSION);